system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define DEV 0
#define DGS_Check_Call( Statement , MsgString ) \
\
{ \
printf ( "Checking CUDA Call... \n") ; \
const cudaError_t error = Statement; \
if (error != cudaSuccess) \
{ \
printf( "Error checking CUDA Call... \n") ; \
printf( "Error: %s:%d, ", __FILE__, __LINE__) ; \
printf( "code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
printf( "Call Checked with error, stopping...\n"); \
exit(1); \
} \
printf ( "Call Checked OK, region... \n"); \
}
// Kernel declaration
__global__ void sumMatrixRowsKernel(int N, double * M_d, double * sum_d);
__host__ void sumMatrixRows(int M, int N, double * M_h, double * sum_h);
// Auxiliary functions declarations
void random_vector(double *a, size_t n);
void array_zeros(double *r, size_t n);
int main(int argc, char * argv[]) {
if (argc < 2) {
printf("El programa recibe M la dimensión de la matriz cuadrada aleatoria a sumar por filas");
}
// Recieve matrix dimension as input and convert it to int
int M = atoi(argv[1]);
// Declare M_h the matrix to sum, and sum_h the array of sums.
double * M_h = (double *) aligned_alloc(64, M*M*sizeof(double));
double * sum_h = (double *) aligned_alloc(64, M*sizeof(double));
// Assign random values to M_h and zeros to sum_h
random_vector(M_h, M*M);
array_zeros(sum_h, M);
printf("sum_h before: ");
for (int i = 0; i < M; ++i) {
printf("%f",sum_h[i]);
}
printf("\n");
// Invoke the function to sum the rows of M_h matrix
sumMatrixRows(M, M, M_h, sum_h);
printf("sum_h after: ");
for (int i = 0; i < M; ++i) {
printf("%f ",sum_h[i]);
}
printf("\n");
}
void sumMatrixRows(int M, int N, double * M_h, double * sum_h) {
// Set up device
// cudaSetDevice(DEV);
DGS_Check_Call(cudaSetDevice(DEV), "cudaSetDevice"); // dev - device identifier
// Declare the size of the matrix and the size of the sum array
int size_matrix = M * N * sizeof(double), size_sum = M * sizeof(double);
// Declare the Matrix and the sum array of the device
double * M_d, * sum_d;
// Allocate memory on device
cudaMalloc((void**)&M_d, size_matrix); // allocate Matrix space on device global memory
cudaMalloc((void**)&sum_d, size_sum); // allocate sum array space on device global memory
// Initialize matrices on device
cudaMemcpy(M_d, M_h, size_matrix, cudaMemcpyHostToDevice); // Copy matrix from host to device.
cudaMemset(sum_d, 0, size_sum); // Initialize sum_d as an array of 0 and size: size_sum in device (dont need to copy just initialize it on device because it is a simpe array of zeros)
// Set the execution
dim3 gridSize(1, 1); // Grid dimension (Just 1 block in both dims for now)
dim3 blockSize(M, 1, 1); // Block dimension (Just M threads -as many as rows- in x dim for now)
// Invoke kernel
sumMatrixRowsKernel <<< gridSize, blockSize >>> (N, M_d, sum_d);
// Bring result to host
cudaMemcpy(sum_h, sum_d, size_sum, cudaMemcpyDeviceToHost); // Copy the sum_d array from device global memory to host DRAM.
// Free memory on device
cudaFree(M_d); // Free matrix space in memory on device
cudaFree(sum_d); // Free sum array space in memory on device
}
// Kernel definition
__global__ void sumMatrixRowsKernel(int N, double * M_d, double * sum_d) {
double partial_sum = 0;
int aux = threadIdx.x * N; // There are as many threads as rows in M_d matrix. aux represents the pointer pointing to the first column of this row. Each thread will represent each row.
// Sum all elements of this row (N is the number of columns)
for (int k = 0; k < N; ++k) {
partial_sum += M_d[aux+k];
}
// Assign the partial_sum to the sum_d array of sums
sum_d[threadIdx.x] = partial_sum;
}
void random_vector(double *a, size_t n) {
for (unsigned int i = 0; i < n; i++) {
a[i] = (double)rand() / (double)RAND_MAX;
}
}
void array_zeros(double *r, size_t n) {
for (unsigned int i = 0; i < n; i++) {
r[i] = 0;
}
} | .file "tmpxft_00155f73_00000000-6_sumMatrixRows.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z13random_vectorPdm
.type _Z13random_vectorPdm, @function
_Z13random_vectorPdm:
.LFB2059:
.cfi_startproc
endbr64
testq %rsi, %rsi
je .L8
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %r13
movq %rsi, %r12
movl $0, %ebp
movl $0, %ebx
.L5:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
divsd .LC0(%rip), %xmm0
movsd %xmm0, 0(%r13,%rbx,8)
addl $1, %ebp
movl %ebp, %ebx
cmpq %r12, %rbx
jb .L5
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
ret
.cfi_endproc
.LFE2059:
.size _Z13random_vectorPdm, .-_Z13random_vectorPdm
.globl _Z11array_zerosPdm
.type _Z11array_zerosPdm, @function
_Z11array_zerosPdm:
.LFB2060:
.cfi_startproc
endbr64
testq %rsi, %rsi
je .L11
movl $0, %edx
movl $0, %eax
.L13:
movq $0x000000000, (%rdi,%rax,8)
addl $1, %edx
movl %edx, %eax
cmpq %rsi, %rax
jb .L13
.L11:
ret
.cfi_endproc
.LFE2060:
.size _Z11array_zerosPdm, .-_Z11array_zerosPdm
.globl _Z42__device_stub__Z19sumMatrixRowsKerneliPdS_iPdS_
.type _Z42__device_stub__Z19sumMatrixRowsKerneliPdS_iPdS_, @function
_Z42__device_stub__Z19sumMatrixRowsKerneliPdS_iPdS_:
.LFB2085:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19sumMatrixRowsKerneliPdS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z42__device_stub__Z19sumMatrixRowsKerneliPdS_iPdS_, .-_Z42__device_stub__Z19sumMatrixRowsKerneliPdS_iPdS_
.globl _Z19sumMatrixRowsKerneliPdS_
.type _Z19sumMatrixRowsKerneliPdS_, @function
_Z19sumMatrixRowsKerneliPdS_:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z19sumMatrixRowsKerneliPdS_iPdS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z19sumMatrixRowsKerneliPdS_, .-_Z19sumMatrixRowsKerneliPdS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "Checking CUDA Call... \n"
.LC3:
.string "Error checking CUDA Call... \n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "/home/ubuntu/Datasets/stackv2/train-structured/maxibove13/GPGPU/master/test_cuda/sumMatrixRows.cu"
.section .rodata.str1.1
.LC5:
.string "Error: %s:%d, "
.LC6:
.string "code: %d, reason: %s\n"
.section .rodata.str1.8
.align 8
.LC7:
.string "Call Checked with error, stopping...\n"
.section .rodata.str1.1
.LC8:
.string "Call Checked OK, region... \n"
.text
.globl _Z13sumMatrixRowsiiPdS_
.type _Z13sumMatrixRowsiiPdS_, @function
_Z13sumMatrixRowsiiPdS_:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movl %edi, %ebp
movl %esi, %r12d
movq %rdx, %r14
movq %rcx, %r13
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq .LC2(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $0, %edi
call cudaSetDevice@PLT
testl %eax, %eax
jne .L28
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %ebx
imull %r12d, %ebx
sall $3, %ebx
movslq %ebx, %rbx
movq %rsp, %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leal 0(,%rbp,8), %r15d
movslq %r15d, %r15
leaq 8(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movq %r15, %rdx
movl $0, %esi
movq 8(%rsp), %rdi
call cudaMemset@PLT
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl %ebp, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L29
.L25:
movl $2, %ecx
movq %r15, %rdx
movq 8(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L30
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movl %eax, %ebx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $69, %ecx
leaq .LC4(%rip), %rdx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebx, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %ebx, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L29:
movq 8(%rsp), %rdx
movq (%rsp), %rsi
movl %r12d, %edi
call _Z42__device_stub__Z19sumMatrixRowsKerneliPdS_iPdS_
jmp .L25
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z13sumMatrixRowsiiPdS_, .-_Z13sumMatrixRowsiiPdS_
.section .rodata.str1.8
.align 8
.LC9:
.string "El programa recibe M la dimensi\303\263n de la matriz cuadrada aleatoria a sumar por filas"
.section .rodata.str1.1
.LC10:
.string "sum_h before: "
.LC11:
.string "%f"
.LC12:
.string "\n"
.LC13:
.string "sum_h after: "
.LC14:
.string "%f "
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rsi, %rbx
cmpl $1, %edi
jle .L41
.L32:
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movl %eax, 12(%rsp)
movl %eax, %ebx
imull %eax, %ebx
movslq %ebx, %rbx
leaq 0(,%rbx,8), %rsi
movl $64, %edi
call aligned_alloc@PLT
movq %rax, %r15
movslq %ebp, %r12
leaq 0(,%r12,8), %rsi
movl $64, %edi
call aligned_alloc@PLT
movq %rax, %r14
movq %rbx, %rsi
movq %r15, %rdi
call _Z13random_vectorPdm
movq %r12, %rsi
movq %r14, %rdi
call _Z11array_zerosPdm
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %ebp, %ebp
jle .L33
movq %r14, %rbx
leal -1(%rbp), %eax
leaq 8(%r14,%rax,8), %r12
movq %r14, %rbp
leaq .LC11(%rip), %r13
.L34:
movsd 0(%rbp), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $8, %rbp
cmpq %r12, %rbp
jne .L34
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r14, %rcx
movq %r15, %rdx
movl 12(%rsp), %edi
movl %edi, %esi
call _Z13sumMatrixRowsiiPdS_
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC14(%rip), %rbp
.L35:
movsd (%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $8, %rbx
cmpq %r12, %rbx
jne .L35
.L36:
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L32
.L33:
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r14, %rcx
movq %r15, %rdx
movl 12(%rsp), %edi
movl %edi, %esi
call _Z13sumMatrixRowsiiPdS_
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L36
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC15:
.string "_Z19sumMatrixRowsKerneliPdS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z19sumMatrixRowsKerneliPdS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -4194304
.long 1105199103
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define DEV 0
#define DGS_Check_Call( Statement , MsgString ) \
\
{ \
printf ( "Checking CUDA Call... \n") ; \
const cudaError_t error = Statement; \
if (error != cudaSuccess) \
{ \
printf( "Error checking CUDA Call... \n") ; \
printf( "Error: %s:%d, ", __FILE__, __LINE__) ; \
printf( "code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
printf( "Call Checked with error, stopping...\n"); \
exit(1); \
} \
printf ( "Call Checked OK, region... \n"); \
}
// Kernel declaration
__global__ void sumMatrixRowsKernel(int N, double * M_d, double * sum_d);
__host__ void sumMatrixRows(int M, int N, double * M_h, double * sum_h);
// Auxiliary functions declarations
void random_vector(double *a, size_t n);
void array_zeros(double *r, size_t n);
int main(int argc, char * argv[]) {
if (argc < 2) {
printf("El programa recibe M la dimensión de la matriz cuadrada aleatoria a sumar por filas");
}
// Recieve matrix dimension as input and convert it to int
int M = atoi(argv[1]);
// Declare M_h the matrix to sum, and sum_h the array of sums.
double * M_h = (double *) aligned_alloc(64, M*M*sizeof(double));
double * sum_h = (double *) aligned_alloc(64, M*sizeof(double));
// Assign random values to M_h and zeros to sum_h
random_vector(M_h, M*M);
array_zeros(sum_h, M);
printf("sum_h before: ");
for (int i = 0; i < M; ++i) {
printf("%f",sum_h[i]);
}
printf("\n");
// Invoke the function to sum the rows of M_h matrix
sumMatrixRows(M, M, M_h, sum_h);
printf("sum_h after: ");
for (int i = 0; i < M; ++i) {
printf("%f ",sum_h[i]);
}
printf("\n");
}
void sumMatrixRows(int M, int N, double * M_h, double * sum_h) {
// Set up device
// cudaSetDevice(DEV);
DGS_Check_Call(cudaSetDevice(DEV), "cudaSetDevice"); // dev - device identifier
// Declare the size of the matrix and the size of the sum array
int size_matrix = M * N * sizeof(double), size_sum = M * sizeof(double);
// Declare the Matrix and the sum array of the device
double * M_d, * sum_d;
// Allocate memory on device
cudaMalloc((void**)&M_d, size_matrix); // allocate Matrix space on device global memory
cudaMalloc((void**)&sum_d, size_sum); // allocate sum array space on device global memory
// Initialize matrices on device
cudaMemcpy(M_d, M_h, size_matrix, cudaMemcpyHostToDevice); // Copy matrix from host to device.
cudaMemset(sum_d, 0, size_sum); // Initialize sum_d as an array of 0 and size: size_sum in device (dont need to copy just initialize it on device because it is a simpe array of zeros)
// Set the execution
dim3 gridSize(1, 1); // Grid dimension (Just 1 block in both dims for now)
dim3 blockSize(M, 1, 1); // Block dimension (Just M threads -as many as rows- in x dim for now)
// Invoke kernel
sumMatrixRowsKernel <<< gridSize, blockSize >>> (N, M_d, sum_d);
// Bring result to host
cudaMemcpy(sum_h, sum_d, size_sum, cudaMemcpyDeviceToHost); // Copy the sum_d array from device global memory to host DRAM.
// Free memory on device
cudaFree(M_d); // Free matrix space in memory on device
cudaFree(sum_d); // Free sum array space in memory on device
}
// Kernel definition
__global__ void sumMatrixRowsKernel(int N, double * M_d, double * sum_d) {
double partial_sum = 0;
int aux = threadIdx.x * N; // There are as many threads as rows in M_d matrix. aux represents the pointer pointing to the first column of this row. Each thread will represent each row.
// Sum all elements of this row (N is the number of columns)
for (int k = 0; k < N; ++k) {
partial_sum += M_d[aux+k];
}
// Assign the partial_sum to the sum_d array of sums
sum_d[threadIdx.x] = partial_sum;
}
void random_vector(double *a, size_t n) {
for (unsigned int i = 0; i < n; i++) {
a[i] = (double)rand() / (double)RAND_MAX;
}
}
void array_zeros(double *r, size_t n) {
for (unsigned int i = 0; i < n; i++) {
r[i] = 0;
}
} | #include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#define DEV 0
#define DGS_Check_Call( Statement , MsgString ) \
\
{ \
printf ( "Checking CUDA Call... \n") ; \
const hipError_t error = Statement; \
if (error != hipSuccess) \
{ \
printf( "Error checking CUDA Call... \n") ; \
printf( "Error: %s:%d, ", __FILE__, __LINE__) ; \
printf( "code: %d, reason: %s\n", error, hipGetErrorString(error)); \
printf( "Call Checked with error, stopping...\n"); \
exit(1); \
} \
printf ( "Call Checked OK, region... \n"); \
}
// Kernel declaration
__global__ void sumMatrixRowsKernel(int N, double * M_d, double * sum_d);
__host__ void sumMatrixRows(int M, int N, double * M_h, double * sum_h);
// Auxiliary functions declarations
void random_vector(double *a, size_t n);
void array_zeros(double *r, size_t n);
int main(int argc, char * argv[]) {
if (argc < 2) {
printf("El programa recibe M la dimensión de la matriz cuadrada aleatoria a sumar por filas");
}
// Recieve matrix dimension as input and convert it to int
int M = atoi(argv[1]);
// Declare M_h the matrix to sum, and sum_h the array of sums.
double * M_h = (double *) aligned_alloc(64, M*M*sizeof(double));
double * sum_h = (double *) aligned_alloc(64, M*sizeof(double));
// Assign random values to M_h and zeros to sum_h
random_vector(M_h, M*M);
array_zeros(sum_h, M);
printf("sum_h before: ");
for (int i = 0; i < M; ++i) {
printf("%f",sum_h[i]);
}
printf("\n");
// Invoke the function to sum the rows of M_h matrix
sumMatrixRows(M, M, M_h, sum_h);
printf("sum_h after: ");
for (int i = 0; i < M; ++i) {
printf("%f ",sum_h[i]);
}
printf("\n");
}
void sumMatrixRows(int M, int N, double * M_h, double * sum_h) {
// Set up device
// cudaSetDevice(DEV);
DGS_Check_Call(hipSetDevice(DEV), "hipSetDevice"); // dev - device identifier
// Declare the size of the matrix and the size of the sum array
int size_matrix = M * N * sizeof(double), size_sum = M * sizeof(double);
// Declare the Matrix and the sum array of the device
double * M_d, * sum_d;
// Allocate memory on device
hipMalloc((void**)&M_d, size_matrix); // allocate Matrix space on device global memory
hipMalloc((void**)&sum_d, size_sum); // allocate sum array space on device global memory
// Initialize matrices on device
hipMemcpy(M_d, M_h, size_matrix, hipMemcpyHostToDevice); // Copy matrix from host to device.
hipMemset(sum_d, 0, size_sum); // Initialize sum_d as an array of 0 and size: size_sum in device (dont need to copy just initialize it on device because it is a simpe array of zeros)
// Set the execution
dim3 gridSize(1, 1); // Grid dimension (Just 1 block in both dims for now)
dim3 blockSize(M, 1, 1); // Block dimension (Just M threads -as many as rows- in x dim for now)
// Invoke kernel
sumMatrixRowsKernel <<< gridSize, blockSize >>> (N, M_d, sum_d);
// Bring result to host
hipMemcpy(sum_h, sum_d, size_sum, hipMemcpyDeviceToHost); // Copy the sum_d array from device global memory to host DRAM.
// Free memory on device
hipFree(M_d); // Free matrix space in memory on device
hipFree(sum_d); // Free sum array space in memory on device
}
// Kernel definition
__global__ void sumMatrixRowsKernel(int N, double * M_d, double * sum_d) {
double partial_sum = 0;
int aux = threadIdx.x * N; // There are as many threads as rows in M_d matrix. aux represents the pointer pointing to the first column of this row. Each thread will represent each row.
// Sum all elements of this row (N is the number of columns)
for (int k = 0; k < N; ++k) {
partial_sum += M_d[aux+k];
}
// Assign the partial_sum to the sum_d array of sums
sum_d[threadIdx.x] = partial_sum;
}
void random_vector(double *a, size_t n) {
for (unsigned int i = 0; i < n; i++) {
a[i] = (double)rand() / (double)RAND_MAX;
}
}
void array_zeros(double *r, size_t n) {
for (unsigned int i = 0; i < n; i++) {
r[i] = 0;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#define DEV 0
#define DGS_Check_Call( Statement , MsgString ) \
\
{ \
printf ( "Checking CUDA Call... \n") ; \
const hipError_t error = Statement; \
if (error != hipSuccess) \
{ \
printf( "Error checking CUDA Call... \n") ; \
printf( "Error: %s:%d, ", __FILE__, __LINE__) ; \
printf( "code: %d, reason: %s\n", error, hipGetErrorString(error)); \
printf( "Call Checked with error, stopping...\n"); \
exit(1); \
} \
printf ( "Call Checked OK, region... \n"); \
}
// Kernel declaration
__global__ void sumMatrixRowsKernel(int N, double * M_d, double * sum_d);
__host__ void sumMatrixRows(int M, int N, double * M_h, double * sum_h);
// Auxiliary functions declarations
void random_vector(double *a, size_t n);
void array_zeros(double *r, size_t n);
int main(int argc, char * argv[]) {
if (argc < 2) {
printf("El programa recibe M la dimensión de la matriz cuadrada aleatoria a sumar por filas");
}
// Recieve matrix dimension as input and convert it to int
int M = atoi(argv[1]);
// Declare M_h the matrix to sum, and sum_h the array of sums.
double * M_h = (double *) aligned_alloc(64, M*M*sizeof(double));
double * sum_h = (double *) aligned_alloc(64, M*sizeof(double));
// Assign random values to M_h and zeros to sum_h
random_vector(M_h, M*M);
array_zeros(sum_h, M);
printf("sum_h before: ");
for (int i = 0; i < M; ++i) {
printf("%f",sum_h[i]);
}
printf("\n");
// Invoke the function to sum the rows of M_h matrix
sumMatrixRows(M, M, M_h, sum_h);
printf("sum_h after: ");
for (int i = 0; i < M; ++i) {
printf("%f ",sum_h[i]);
}
printf("\n");
}
void sumMatrixRows(int M, int N, double * M_h, double * sum_h) {
// Set up device
// cudaSetDevice(DEV);
DGS_Check_Call(hipSetDevice(DEV), "hipSetDevice"); // dev - device identifier
// Declare the size of the matrix and the size of the sum array
int size_matrix = M * N * sizeof(double), size_sum = M * sizeof(double);
// Declare the Matrix and the sum array of the device
double * M_d, * sum_d;
// Allocate memory on device
hipMalloc((void**)&M_d, size_matrix); // allocate Matrix space on device global memory
hipMalloc((void**)&sum_d, size_sum); // allocate sum array space on device global memory
// Initialize matrices on device
hipMemcpy(M_d, M_h, size_matrix, hipMemcpyHostToDevice); // Copy matrix from host to device.
hipMemset(sum_d, 0, size_sum); // Initialize sum_d as an array of 0 and size: size_sum in device (dont need to copy just initialize it on device because it is a simpe array of zeros)
// Set the execution
dim3 gridSize(1, 1); // Grid dimension (Just 1 block in both dims for now)
dim3 blockSize(M, 1, 1); // Block dimension (Just M threads -as many as rows- in x dim for now)
// Invoke kernel
sumMatrixRowsKernel <<< gridSize, blockSize >>> (N, M_d, sum_d);
// Bring result to host
hipMemcpy(sum_h, sum_d, size_sum, hipMemcpyDeviceToHost); // Copy the sum_d array from device global memory to host DRAM.
// Free memory on device
hipFree(M_d); // Free matrix space in memory on device
hipFree(sum_d); // Free sum array space in memory on device
}
// Kernel definition
__global__ void sumMatrixRowsKernel(int N, double * M_d, double * sum_d) {
double partial_sum = 0;
int aux = threadIdx.x * N; // There are as many threads as rows in M_d matrix. aux represents the pointer pointing to the first column of this row. Each thread will represent each row.
// Sum all elements of this row (N is the number of columns)
for (int k = 0; k < N; ++k) {
partial_sum += M_d[aux+k];
}
// Assign the partial_sum to the sum_d array of sums
sum_d[threadIdx.x] = partial_sum;
}
void random_vector(double *a, size_t n) {
for (unsigned int i = 0; i < n; i++) {
a[i] = (double)rand() / (double)RAND_MAX;
}
}
void array_zeros(double *r, size_t n) {
for (unsigned int i = 0; i < n; i++) {
r[i] = 0;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19sumMatrixRowsKerneliPdS_
.globl _Z19sumMatrixRowsKerneliPdS_
.p2align 8
.type _Z19sumMatrixRowsKerneliPdS_,@function
_Z19sumMatrixRowsKerneliPdS_:
s_load_b32 s2, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_3
s_load_b64 s[4:5], s[0:1], 0x8
v_mul_lo_u32 v1, v0, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[3:4], 3, v[1:2]
v_mov_b32_e32 v1, 0
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v3, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
.LBB0_2:
global_load_b64 v[5:6], v[3:4], off
v_add_co_u32 v3, vcc_lo, v3, 8
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_add_i32 s2, s2, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s2, 0
s_waitcnt vmcnt(0)
v_add_f64 v[1:2], v[1:2], v[5:6]
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_4
.LBB0_3:
v_mov_b32_e32 v1, 0
v_mov_b32_e32 v2, 0
.LBB0_4:
s_load_b64 s[0:1], s[0:1], 0x10
v_lshlrev_b32_e32 v0, 3, v0
s_waitcnt lgkmcnt(0)
global_store_b64 v0, v[1:2], s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19sumMatrixRowsKerneliPdS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 6
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19sumMatrixRowsKerneliPdS_, .Lfunc_end0-_Z19sumMatrixRowsKerneliPdS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19sumMatrixRowsKerneliPdS_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z19sumMatrixRowsKerneliPdS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#define DEV 0
#define DGS_Check_Call( Statement , MsgString ) \
\
{ \
printf ( "Checking CUDA Call... \n") ; \
const hipError_t error = Statement; \
if (error != hipSuccess) \
{ \
printf( "Error checking CUDA Call... \n") ; \
printf( "Error: %s:%d, ", __FILE__, __LINE__) ; \
printf( "code: %d, reason: %s\n", error, hipGetErrorString(error)); \
printf( "Call Checked with error, stopping...\n"); \
exit(1); \
} \
printf ( "Call Checked OK, region... \n"); \
}
// Kernel declaration
__global__ void sumMatrixRowsKernel(int N, double * M_d, double * sum_d);
__host__ void sumMatrixRows(int M, int N, double * M_h, double * sum_h);
// Auxiliary functions declarations
void random_vector(double *a, size_t n);
void array_zeros(double *r, size_t n);
int main(int argc, char * argv[]) {
if (argc < 2) {
printf("El programa recibe M la dimensión de la matriz cuadrada aleatoria a sumar por filas");
}
// Recieve matrix dimension as input and convert it to int
int M = atoi(argv[1]);
// Declare M_h the matrix to sum, and sum_h the array of sums.
double * M_h = (double *) aligned_alloc(64, M*M*sizeof(double));
double * sum_h = (double *) aligned_alloc(64, M*sizeof(double));
// Assign random values to M_h and zeros to sum_h
random_vector(M_h, M*M);
array_zeros(sum_h, M);
printf("sum_h before: ");
for (int i = 0; i < M; ++i) {
printf("%f",sum_h[i]);
}
printf("\n");
// Invoke the function to sum the rows of M_h matrix
sumMatrixRows(M, M, M_h, sum_h);
printf("sum_h after: ");
for (int i = 0; i < M; ++i) {
printf("%f ",sum_h[i]);
}
printf("\n");
}
void sumMatrixRows(int M, int N, double * M_h, double * sum_h) {
// Set up device
// cudaSetDevice(DEV);
DGS_Check_Call(hipSetDevice(DEV), "hipSetDevice"); // dev - device identifier
// Declare the size of the matrix and the size of the sum array
int size_matrix = M * N * sizeof(double), size_sum = M * sizeof(double);
// Declare the Matrix and the sum array of the device
double * M_d, * sum_d;
// Allocate memory on device
hipMalloc((void**)&M_d, size_matrix); // allocate Matrix space on device global memory
hipMalloc((void**)&sum_d, size_sum); // allocate sum array space on device global memory
// Initialize matrices on device
hipMemcpy(M_d, M_h, size_matrix, hipMemcpyHostToDevice); // Copy matrix from host to device.
hipMemset(sum_d, 0, size_sum); // Initialize sum_d as an array of 0 and size: size_sum in device (dont need to copy just initialize it on device because it is a simpe array of zeros)
// Set the execution
dim3 gridSize(1, 1); // Grid dimension (Just 1 block in both dims for now)
dim3 blockSize(M, 1, 1); // Block dimension (Just M threads -as many as rows- in x dim for now)
// Invoke kernel
sumMatrixRowsKernel <<< gridSize, blockSize >>> (N, M_d, sum_d);
// Bring result to host
hipMemcpy(sum_h, sum_d, size_sum, hipMemcpyDeviceToHost); // Copy the sum_d array from device global memory to host DRAM.
// Free memory on device
hipFree(M_d); // Free matrix space in memory on device
hipFree(sum_d); // Free sum array space in memory on device
}
// Kernel definition
__global__ void sumMatrixRowsKernel(int N, double * M_d, double * sum_d) {
double partial_sum = 0;
int aux = threadIdx.x * N; // There are as many threads as rows in M_d matrix. aux represents the pointer pointing to the first column of this row. Each thread will represent each row.
// Sum all elements of this row (N is the number of columns)
for (int k = 0; k < N; ++k) {
partial_sum += M_d[aux+k];
}
// Assign the partial_sum to the sum_d array of sums
sum_d[threadIdx.x] = partial_sum;
}
void random_vector(double *a, size_t n) {
for (unsigned int i = 0; i < n; i++) {
a[i] = (double)rand() / (double)RAND_MAX;
}
}
void array_zeros(double *r, size_t n) {
for (unsigned int i = 0; i < n; i++) {
r[i] = 0;
}
} | .text
.file "sumMatrixRows.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI0_0:
.quad 0x41dfffffffc00000 # double 2147483647
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
cmpl $1, %edi
jg .LBB0_2
# %bb.1:
movl $.L.str, %edi
xorl %eax, %eax
callq printf
.LBB0_2:
movq 8(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movl %r14d, %r12d
imull %r12d, %r12d
leaq (,%r12,8), %rsi
movl $64, %edi
callq aligned_alloc
movq %rax, %r15
movq %r14, %r13
shlq $32, %r13
movq %r13, %rsi
sarq $29, %rsi
movl $64, %edi
callq aligned_alloc
movq %rax, %rbx
testl %r12d, %r12d
je .LBB0_5
# %bb.3: # %.lr.ph.i.preheader
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB0_4: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd .LCPI0_0(%rip), %xmm0
movsd %xmm0, (%r15,%rbp,8)
incq %rbp
cmpq %rbp, %r12
jne .LBB0_4
.LBB0_5: # %_Z13random_vectorPdm.exit
testq %r13, %r13
je .LBB0_8
# %bb.6: # %.lr.ph.i28.preheader
movslq %r14d, %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB0_7: # %.lr.ph.i28
# =>This Inner Loop Header: Depth=1
movq $0, (%rbx,%rcx,8)
incq %rcx
movl %ecx, %edx
cmpq %rdx, %rax
ja .LBB0_7
.LBB0_8: # %_Z11array_zerosPdm.exit
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
testl %r14d, %r14d
jle .LBB0_11
# %bb.9: # %.lr.ph.preheader
movl %r14d, %r12d
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB0_10: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movsd (%rbx,%r13,8), %xmm0 # xmm0 = mem[0],zero
movl $.L.str.2, %edi
movb $1, %al
callq printf
incq %r13
cmpq %r13, %r12
jne .LBB0_10
.LBB0_11: # %._crit_edge
movl $10, %edi
callq putchar@PLT
movl %r14d, %edi
movl %r14d, %esi
movq %r15, %rdx
movq %rbx, %rcx
callq _Z13sumMatrixRowsiiPdS_
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
testl %r14d, %r14d
jle .LBB0_14
# %bb.12: # %.lr.ph33.preheader
movl %r14d, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_13: # %.lr.ph33
# =>This Inner Loop Header: Depth=1
movsd (%rbx,%r15,8), %xmm0 # xmm0 = mem[0],zero
movl $.L.str.5, %edi
movb $1, %al
callq printf
incq %r15
cmpq %r15, %r14
jne .LBB0_13
.LBB0_14: # %._crit_edge34
movl $10, %edi
callq putchar@PLT
xorl %eax, %eax
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z13random_vectorPdm
.LCPI1_0:
.quad 0x41dfffffffc00000 # double 2147483647
.text
.globl _Z13random_vectorPdm
.p2align 4, 0x90
.type _Z13random_vectorPdm,@function
_Z13random_vectorPdm: # @_Z13random_vectorPdm
.cfi_startproc
# %bb.0:
testq %rsi, %rsi
je .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movq %rdi, %r14
movl $1, %ebp
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd .LCPI1_0(%rip), %xmm0
movsd %xmm0, (%r14,%r15,8)
movl %ebp, %r15d
incl %ebp
cmpq %rbx, %r15
jb .LBB1_2
# %bb.3:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z13random_vectorPdm, .Lfunc_end1-_Z13random_vectorPdm
.cfi_endproc
# -- End function
.globl _Z11array_zerosPdm # -- Begin function _Z11array_zerosPdm
.p2align 4, 0x90
.type _Z11array_zerosPdm,@function
_Z11array_zerosPdm: # @_Z11array_zerosPdm
.cfi_startproc
# %bb.0:
testq %rsi, %rsi
je .LBB2_3
# %bb.1: # %.lr.ph.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq $0, (%rdi,%rax,8)
incq %rax
movl %eax, %ecx
cmpq %rsi, %rcx
jb .LBB2_2
.LBB2_3: # %._crit_edge
retq
.Lfunc_end2:
.size _Z11array_zerosPdm, .Lfunc_end2-_Z11array_zerosPdm
.cfi_endproc
# -- End function
.globl _Z13sumMatrixRowsiiPdS_ # -- Begin function _Z13sumMatrixRowsiiPdS_
.p2align 4, 0x90
.type _Z13sumMatrixRowsiiPdS_,@function
_Z13sumMatrixRowsiiPdS_: # @_Z13sumMatrixRowsiiPdS_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rcx, %rbx
movq %rdx, %r12
movl %esi, %ebp
movl %edi, %r15d
movl $.Lstr, %edi
callq puts@PLT
xorl %edi, %edi
callq hipSetDevice
testl %eax, %eax
jne .LBB3_4
# %bb.1:
movl $.Lstr.1, %edi
callq puts@PLT
leal (,%r15,8), %r14d
movl %r14d, %eax
imull %ebp, %eax
movslq %eax, %r13
leaq 16(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
movslq %r14d, %r14
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %r12, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl %r15d, %edx
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rdi, %rdx
orq $1, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_3
# %bb.2:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movl %ebp, 28(%rsp)
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 88(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z19sumMatrixRowsKerneliPdS_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_3:
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_4:
.cfi_def_cfa_offset 176
movl $.Lstr.2, %edi
movl %eax, %ebx
callq puts@PLT
movl $.L.str.8, %edi
movl $.L.str.9, %esi
movl $69, %edx
xorl %eax, %eax
callq printf
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str.10, %edi
movl %ebx, %esi
movq %rax, %rdx
xorl %eax, %eax
callq printf
movl $.Lstr.3, %edi
callq puts@PLT
movl $1, %edi
callq exit
.Lfunc_end3:
.size _Z13sumMatrixRowsiiPdS_, .Lfunc_end3-_Z13sumMatrixRowsiiPdS_
.cfi_endproc
# -- End function
.globl _Z34__device_stub__sumMatrixRowsKerneliPdS_ # -- Begin function _Z34__device_stub__sumMatrixRowsKerneliPdS_
.p2align 4, 0x90
.type _Z34__device_stub__sumMatrixRowsKerneliPdS_,@function
_Z34__device_stub__sumMatrixRowsKerneliPdS_: # @_Z34__device_stub__sumMatrixRowsKerneliPdS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19sumMatrixRowsKerneliPdS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end4:
.size _Z34__device_stub__sumMatrixRowsKerneliPdS_, .Lfunc_end4-_Z34__device_stub__sumMatrixRowsKerneliPdS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19sumMatrixRowsKerneliPdS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "El programa recibe M la dimensi\303\263n de la matriz cuadrada aleatoria a sumar por filas"
.size .L.str, 85
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "sum_h before: "
.size .L.str.1, 15
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%f"
.size .L.str.2, 3
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "sum_h after: "
.size .L.str.4, 14
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "%f "
.size .L.str.5, 4
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Error: %s:%d, "
.size .L.str.8, 15
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/maxibove13/GPGPU/master/test_cuda/sumMatrixRows.hip"
.size .L.str.9, 109
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "code: %d, reason: %s\n"
.size .L.str.10, 22
.type _Z19sumMatrixRowsKerneliPdS_,@object # @_Z19sumMatrixRowsKerneliPdS_
.section .rodata,"a",@progbits
.globl _Z19sumMatrixRowsKerneliPdS_
.p2align 3, 0x0
_Z19sumMatrixRowsKerneliPdS_:
.quad _Z34__device_stub__sumMatrixRowsKerneliPdS_
.size _Z19sumMatrixRowsKerneliPdS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19sumMatrixRowsKerneliPdS_"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Checking CUDA Call... "
.size .Lstr, 23
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Call Checked OK, region... "
.size .Lstr.1, 28
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Error checking CUDA Call... "
.size .Lstr.2, 29
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Call Checked with error, stopping..."
.size .Lstr.3, 37
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__sumMatrixRowsKerneliPdS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19sumMatrixRowsKerneliPdS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z19sumMatrixRowsKerneliPdS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff027624 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0040*/ CS2R R10, SRZ ; /* 0x00000000000a7805 */
/* 0x000fc6000001ff00 */
/*0050*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fda0003f06270 */
/*0060*/ @!P0 BRA 0x790 ; /* 0x0000072000008947 */
/* 0x000fea0003800000 */
/*0070*/ IADD3 R3, R2.reuse, -0x1, RZ ; /* 0xffffffff02037810 */
/* 0x040fe20007ffe0ff */
/*0080*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0090*/ LOP3.LUT R2, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302027812 */
/* 0x000fe200078ec0ff */
/*00a0*/ CS2R R10, SRZ ; /* 0x00000000000a7805 */
/* 0x000fe2000001ff00 */
/*00b0*/ ISETP.GE.U32.AND P0, PT, R3, 0x3, PT ; /* 0x000000030300780c */
/* 0x000fe20003f06070 */
/*00c0*/ IMAD R3, R0, c[0x0][0x160], RZ ; /* 0x0000580000037a24 */
/* 0x001fd800078e02ff */
/*00d0*/ @!P0 BRA 0x690 ; /* 0x000005b000008947 */
/* 0x000fea0003800000 */
/*00e0*/ IADD3 R26, -R2, c[0x0][0x160], RZ ; /* 0x00005800021a7a10 */
/* 0x000fe20007ffe1ff */
/*00f0*/ IMAD.MOV.U32 R4, RZ, RZ, 0x8 ; /* 0x00000008ff047424 */
/* 0x000fe200078e00ff */
/*0100*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0110*/ CS2R R10, SRZ ; /* 0x00000000000a7805 */
/* 0x000fe2000001ff00 */
/*0120*/ ISETP.GT.AND P0, PT, R26, RZ, PT ; /* 0x000000ff1a00720c */
/* 0x000fe20003f04270 */
/*0130*/ IMAD.WIDE R4, R3, R4, c[0x0][0x168] ; /* 0x00005a0003047625 */
/* 0x000fd800078e0204 */
/*0140*/ @!P0 BRA 0x590 ; /* 0x0000044000008947 */
/* 0x000fea0003800000 */
/*0150*/ ISETP.GT.AND P1, PT, R26, 0xc, PT ; /* 0x0000000c1a00780c */
/* 0x000fe40003f24270 */
/*0160*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0170*/ @!P1 BRA 0x3f0 ; /* 0x0000027000009947 */
/* 0x000fea0003800000 */
/*0180*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0190*/ LDG.E.64 R12, [R4.64] ; /* 0x00000006040c7981 */
/* 0x000ea8000c1e1b00 */
/*01a0*/ LDG.E.64 R14, [R4.64+0x8] ; /* 0x00000806040e7981 */
/* 0x000ee8000c1e1b00 */
/*01b0*/ LDG.E.64 R16, [R4.64+0x10] ; /* 0x0000100604107981 */
/* 0x000f28000c1e1b00 */
/*01c0*/ LDG.E.64 R18, [R4.64+0x18] ; /* 0x0000180604127981 */
/* 0x000f68000c1e1b00 */
/*01d0*/ LDG.E.64 R20, [R4.64+0x20] ; /* 0x0000200604147981 */
/* 0x000f68000c1e1b00 */
/*01e0*/ LDG.E.64 R24, [R4.64+0x28] ; /* 0x0000280604187981 */
/* 0x001f68000c1e1b00 */
/*01f0*/ LDG.E.64 R22, [R4.64+0x30] ; /* 0x0000300604167981 */
/* 0x000f68000c1e1b00 */
/*0200*/ LDG.E.64 R6, [R4.64+0x38] ; /* 0x0000380604067981 */
/* 0x000f68000c1e1b00 */
/*0210*/ LDG.E.64 R8, [R4.64+0x40] ; /* 0x0000400604087981 */
/* 0x000f62000c1e1b00 */
/*0220*/ DADD R12, R12, R10 ; /* 0x000000000c0c7229 */
/* 0x0060c6000000000a */
/*0230*/ LDG.E.64 R10, [R4.64+0x48] ; /* 0x00004806040a7981 */
/* 0x0010a6000c1e1b00 */
/*0240*/ DADD R14, R12, R14 ; /* 0x000000000c0e7229 */
/* 0x008308000000000e */
/*0250*/ LDG.E.64 R12, [R4.64+0x50] ; /* 0x00005006040c7981 */
/* 0x0020e4000c1e1b00 */
/*0260*/ DADD R16, R14, R16 ; /* 0x000000000e107229 */
/* 0x0103480000000010 */
/*0270*/ LDG.E.64 R14, [R4.64+0x58] ; /* 0x00005806040e7981 */
/* 0x002124000c1e1b00 */
/*0280*/ DADD R18, R16, R18 ; /* 0x0000000010127229 */
/* 0x0203480000000012 */
/*0290*/ LDG.E.64 R16, [R4.64+0x60] ; /* 0x0000600604107981 */
/* 0x002124000c1e1b00 */
/*02a0*/ DADD R20, R18, R20 ; /* 0x0000000012147229 */
/* 0x0203480000000014 */
/*02b0*/ LDG.E.64 R18, [R4.64+0x68] ; /* 0x0000680604127981 */
/* 0x002124000c1e1b00 */
/*02c0*/ DADD R24, R20, R24 ; /* 0x0000000014187229 */
/* 0x0203480000000018 */
/*02d0*/ LDG.E.64 R20, [R4.64+0x70] ; /* 0x0000700604147981 */
/* 0x002124000c1e1b00 */
/*02e0*/ DADD R22, R24, R22 ; /* 0x0000000018167229 */
/* 0x0203480000000016 */
/*02f0*/ LDG.E.64 R24, [R4.64+0x78] ; /* 0x0000780604187981 */
/* 0x002122000c1e1b00 */
/*0300*/ IADD3 R26, R26, -0x10, RZ ; /* 0xfffffff01a1a7810 */
/* 0x000fe20007ffe0ff */
/*0310*/ DADD R6, R22, R6 ; /* 0x0000000016067229 */
/* 0x020e620000000006 */
/*0320*/ UIADD3 UR4, UR4, 0x10, URZ ; /* 0x0000001004047890 */
/* 0x000fe4000fffe03f */
/*0330*/ ISETP.GT.AND P1, PT, R26, 0xc, PT ; /* 0x0000000c1a00780c */
/* 0x000fc60003f24270 */
/*0340*/ DADD R6, R6, R8 ; /* 0x0000000006067229 */
/* 0x002ea20000000008 */
/*0350*/ IADD3 R4, P2, R4, 0x80, RZ ; /* 0x0000008004047810 */
/* 0x001fca0007f5e0ff */
/*0360*/ IMAD.X R5, RZ, RZ, R5, P2 ; /* 0x000000ffff057224 */
/* 0x000fe200010e0605 */
/*0370*/ DADD R6, R6, R10 ; /* 0x0000000006067229 */
/* 0x004ecc000000000a */
/*0380*/ DADD R6, R6, R12 ; /* 0x0000000006067229 */
/* 0x008f0c000000000c */
/*0390*/ DADD R6, R6, R14 ; /* 0x0000000006067229 */
/* 0x010e0c000000000e */
/*03a0*/ DADD R6, R6, R16 ; /* 0x0000000006067229 */
/* 0x001e0c0000000010 */
/*03b0*/ DADD R6, R6, R18 ; /* 0x0000000006067229 */
/* 0x001e0c0000000012 */
/*03c0*/ DADD R6, R6, R20 ; /* 0x0000000006067229 */
/* 0x001e0c0000000014 */
/*03d0*/ DADD R10, R6, R24 ; /* 0x00000000060a7229 */
/* 0x0010620000000018 */
/*03e0*/ @P1 BRA 0x190 ; /* 0xfffffda000001947 */
/* 0x000fea000383ffff */
/*03f0*/ ISETP.GT.AND P1, PT, R26, 0x4, PT ; /* 0x000000041a00780c */
/* 0x000fda0003f24270 */
/*0400*/ @!P1 BRA 0x570 ; /* 0x0000016000009947 */
/* 0x000fea0003800000 */
/*0410*/ LDG.E.64 R22, [R4.64] ; /* 0x0000000604167981 */
/* 0x000ea8000c1e1b00 */
/*0420*/ LDG.E.64 R20, [R4.64+0x8] ; /* 0x0000080604147981 */
/* 0x000ee8000c1e1b00 */
/*0430*/ LDG.E.64 R18, [R4.64+0x10] ; /* 0x0000100604127981 */
/* 0x000f28000c1e1b00 */
/*0440*/ LDG.E.64 R16, [R4.64+0x18] ; /* 0x0000180604107981 */
/* 0x000f68000c1e1b00 */
/*0450*/ LDG.E.64 R14, [R4.64+0x20] ; /* 0x00002006040e7981 */
/* 0x000f68000c1e1b00 */
/*0460*/ LDG.E.64 R12, [R4.64+0x28] ; /* 0x00002806040c7981 */
/* 0x000f68000c1e1b00 */
/*0470*/ LDG.E.64 R8, [R4.64+0x30] ; /* 0x0000300604087981 */
/* 0x000f68000c1e1b00 */
/*0480*/ LDG.E.64 R6, [R4.64+0x38] ; /* 0x0000380604067981 */
/* 0x001f62000c1e1b00 */
/*0490*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe20003f0e170 */
/*04a0*/ UIADD3 UR4, UR4, 0x8, URZ ; /* 0x0000000804047890 */
/* 0x000fe2000fffe03f */
/*04b0*/ IADD3 R26, R26, -0x8, RZ ; /* 0xfffffff81a1a7810 */
/* 0x000fe20007ffe0ff */
/*04c0*/ DADD R22, R10, R22 ; /* 0x000000000a167229 */
/* 0x006ecc0000000016 */
/*04d0*/ DADD R20, R22, R20 ; /* 0x0000000016147229 */
/* 0x008f0c0000000014 */
/*04e0*/ DADD R18, R20, R18 ; /* 0x0000000014127229 */
/* 0x010f4c0000000012 */
/*04f0*/ DADD R16, R18, R16 ; /* 0x0000000012107229 */
/* 0x020e0c0000000010 */
/*0500*/ DADD R14, R16, R14 ; /* 0x00000000100e7229 */
/* 0x001e0c000000000e */
/*0510*/ DADD R12, R14, R12 ; /* 0x000000000e0c7229 */
/* 0x001e0c000000000c */
/*0520*/ DADD R8, R12, R8 ; /* 0x000000000c087229 */
/* 0x0010640000000008 */
/*0530*/ IADD3 R12, P1, R4, 0x40, RZ ; /* 0x00000040040c7810 */
/* 0x001fc80007f3e0ff */
/*0540*/ DADD R10, R8, R6 ; /* 0x00000000080a7229 */
/* 0x0020620000000006 */
/*0550*/ IMAD.X R5, RZ, RZ, R5, P1 ; /* 0x000000ffff057224 */
/* 0x000fe400008e0605 */
/*0560*/ IMAD.MOV.U32 R4, RZ, RZ, R12 ; /* 0x000000ffff047224 */
/* 0x000fe400078e000c */
/*0570*/ ISETP.NE.OR P0, PT, R26, RZ, P0 ; /* 0x000000ff1a00720c */
/* 0x003fda0000705670 */
/*0580*/ @!P0 BRA 0x690 ; /* 0x0000010000008947 */
/* 0x000fea0003800000 */
/*0590*/ LDG.E.64 R6, [R4.64] ; /* 0x0000000604067981 */
/* 0x000ea8000c1e1b00 */
/*05a0*/ LDG.E.64 R8, [R4.64+0x8] ; /* 0x0000080604087981 */
/* 0x000ee8000c1e1b00 */
/*05b0*/ LDG.E.64 R12, [R4.64+0x10] ; /* 0x00001006040c7981 */
/* 0x000f28000c1e1b00 */
/*05c0*/ LDG.E.64 R14, [R4.64+0x18] ; /* 0x00001806040e7981 */
/* 0x000162000c1e1b00 */
/*05d0*/ IADD3 R26, R26, -0x4, RZ ; /* 0xfffffffc1a1a7810 */
/* 0x000fe20007ffe0ff */
/*05e0*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fc6000fffe03f */
/*05f0*/ ISETP.NE.AND P0, PT, R26, RZ, PT ; /* 0x000000ff1a00720c */
/* 0x000fe20003f05270 */
/*0600*/ DADD R6, R6, R10 ; /* 0x0000000006067229 */
/* 0x004ecc000000000a */
/*0610*/ DADD R6, R6, R8 ; /* 0x0000000006067229 */
/* 0x0083240000000008 */
/*0620*/ IADD3 R8, P1, R4, 0x20, RZ ; /* 0x0000002004087810 */
/* 0x002fc80007f3e0ff */
/*0630*/ DADD R6, R6, R12 ; /* 0x0000000006067229 */
/* 0x010f62000000000c */
/*0640*/ IMAD.X R9, RZ, RZ, R5, P1 ; /* 0x000000ffff097224 */
/* 0x000fe400008e0605 */
/*0650*/ IMAD.MOV.U32 R4, RZ, RZ, R8 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0008 */
/*0660*/ IMAD.MOV.U32 R5, RZ, RZ, R9 ; /* 0x000000ffff057224 */
/* 0x000fe200078e0009 */
/*0670*/ DADD R10, R6, R14 ; /* 0x00000000060a7229 */
/* 0x020064000000000e */
/*0680*/ @P0 BRA 0x590 ; /* 0xffffff0000000947 */
/* 0x003fea000383ffff */
/*0690*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fda0003f05270 */
/*06a0*/ @!P0 BRA 0x790 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*06b0*/ IADD3 R4, R3, UR4, RZ ; /* 0x0000000403047c10 */
/* 0x000fe2000fffe0ff */
/*06c0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x8 ; /* 0x00000008ff057424 */
/* 0x000fc800078e00ff */
/*06d0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fc800078e0205 */
/*06e0*/ IMAD.MOV.U32 R3, RZ, RZ, R4 ; /* 0x000000ffff037224 */
/* 0x000fe400078e0004 */
/*06f0*/ IMAD.MOV.U32 R6, RZ, RZ, R5 ; /* 0x000000ffff067224 */
/* 0x000fe400078e0005 */
/*0700*/ IMAD.MOV.U32 R4, RZ, RZ, R3 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0003 */
/*0710*/ IMAD.MOV.U32 R5, RZ, RZ, R6 ; /* 0x000000ffff057224 */
/* 0x000fcc00078e0006 */
/*0720*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000604047981 */
/* 0x000ea2000c1e1b00 */
/*0730*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fe40007ffe0ff */
/*0740*/ IADD3 R3, P1, R3, 0x8, RZ ; /* 0x0000000803037810 */
/* 0x000fe40007f3e0ff */
/*0750*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fc60003f05270 */
/*0760*/ IMAD.X R6, RZ, RZ, R6, P1 ; /* 0x000000ffff067224 */
/* 0x000fe200008e0606 */
/*0770*/ DADD R10, R4, R10 ; /* 0x00000000040a7229 */
/* 0x006052000000000a */
/*0780*/ @P0 BRA 0x700 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0790*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */
/* 0x000fc800078e00ff */
/*07a0*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x170] ; /* 0x00005c0000027625 */
/* 0x001fca00078e0003 */
/*07b0*/ STG.E.64 [R2.64], R10 ; /* 0x0000000a02007986 */
/* 0x002fe2000c101b06 */
/*07c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*07d0*/ BRA 0x7d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*07e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0800*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0810*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0820*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0830*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0840*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0850*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0860*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0870*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19sumMatrixRowsKerneliPdS_
.globl _Z19sumMatrixRowsKerneliPdS_
.p2align 8
.type _Z19sumMatrixRowsKerneliPdS_,@function
_Z19sumMatrixRowsKerneliPdS_:
s_load_b32 s2, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_3
s_load_b64 s[4:5], s[0:1], 0x8
v_mul_lo_u32 v1, v0, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[3:4], 3, v[1:2]
v_mov_b32_e32 v1, 0
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v3, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
.LBB0_2:
global_load_b64 v[5:6], v[3:4], off
v_add_co_u32 v3, vcc_lo, v3, 8
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_add_i32 s2, s2, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s2, 0
s_waitcnt vmcnt(0)
v_add_f64 v[1:2], v[1:2], v[5:6]
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_4
.LBB0_3:
v_mov_b32_e32 v1, 0
v_mov_b32_e32 v2, 0
.LBB0_4:
s_load_b64 s[0:1], s[0:1], 0x10
v_lshlrev_b32_e32 v0, 3, v0
s_waitcnt lgkmcnt(0)
global_store_b64 v0, v[1:2], s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19sumMatrixRowsKerneliPdS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 6
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19sumMatrixRowsKerneliPdS_, .Lfunc_end0-_Z19sumMatrixRowsKerneliPdS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19sumMatrixRowsKerneliPdS_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z19sumMatrixRowsKerneliPdS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00155f73_00000000-6_sumMatrixRows.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z13random_vectorPdm
.type _Z13random_vectorPdm, @function
_Z13random_vectorPdm:
.LFB2059:
.cfi_startproc
endbr64
testq %rsi, %rsi
je .L8
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %r13
movq %rsi, %r12
movl $0, %ebp
movl $0, %ebx
.L5:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
divsd .LC0(%rip), %xmm0
movsd %xmm0, 0(%r13,%rbx,8)
addl $1, %ebp
movl %ebp, %ebx
cmpq %r12, %rbx
jb .L5
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
ret
.cfi_endproc
.LFE2059:
.size _Z13random_vectorPdm, .-_Z13random_vectorPdm
.globl _Z11array_zerosPdm
.type _Z11array_zerosPdm, @function
_Z11array_zerosPdm:
.LFB2060:
.cfi_startproc
endbr64
testq %rsi, %rsi
je .L11
movl $0, %edx
movl $0, %eax
.L13:
movq $0x000000000, (%rdi,%rax,8)
addl $1, %edx
movl %edx, %eax
cmpq %rsi, %rax
jb .L13
.L11:
ret
.cfi_endproc
.LFE2060:
.size _Z11array_zerosPdm, .-_Z11array_zerosPdm
.globl _Z42__device_stub__Z19sumMatrixRowsKerneliPdS_iPdS_
.type _Z42__device_stub__Z19sumMatrixRowsKerneliPdS_iPdS_, @function
_Z42__device_stub__Z19sumMatrixRowsKerneliPdS_iPdS_:
.LFB2085:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19sumMatrixRowsKerneliPdS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z42__device_stub__Z19sumMatrixRowsKerneliPdS_iPdS_, .-_Z42__device_stub__Z19sumMatrixRowsKerneliPdS_iPdS_
.globl _Z19sumMatrixRowsKerneliPdS_
.type _Z19sumMatrixRowsKerneliPdS_, @function
_Z19sumMatrixRowsKerneliPdS_:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z19sumMatrixRowsKerneliPdS_iPdS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z19sumMatrixRowsKerneliPdS_, .-_Z19sumMatrixRowsKerneliPdS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "Checking CUDA Call... \n"
.LC3:
.string "Error checking CUDA Call... \n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "/home/ubuntu/Datasets/stackv2/train-structured/maxibove13/GPGPU/master/test_cuda/sumMatrixRows.cu"
.section .rodata.str1.1
.LC5:
.string "Error: %s:%d, "
.LC6:
.string "code: %d, reason: %s\n"
.section .rodata.str1.8
.align 8
.LC7:
.string "Call Checked with error, stopping...\n"
.section .rodata.str1.1
.LC8:
.string "Call Checked OK, region... \n"
.text
.globl _Z13sumMatrixRowsiiPdS_
.type _Z13sumMatrixRowsiiPdS_, @function
_Z13sumMatrixRowsiiPdS_:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movl %edi, %ebp
movl %esi, %r12d
movq %rdx, %r14
movq %rcx, %r13
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq .LC2(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $0, %edi
call cudaSetDevice@PLT
testl %eax, %eax
jne .L28
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %ebx
imull %r12d, %ebx
sall $3, %ebx
movslq %ebx, %rbx
movq %rsp, %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leal 0(,%rbp,8), %r15d
movslq %r15d, %r15
leaq 8(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movq %r15, %rdx
movl $0, %esi
movq 8(%rsp), %rdi
call cudaMemset@PLT
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl %ebp, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L29
.L25:
movl $2, %ecx
movq %r15, %rdx
movq 8(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L30
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movl %eax, %ebx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $69, %ecx
leaq .LC4(%rip), %rdx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebx, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %ebx, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L29:
movq 8(%rsp), %rdx
movq (%rsp), %rsi
movl %r12d, %edi
call _Z42__device_stub__Z19sumMatrixRowsKerneliPdS_iPdS_
jmp .L25
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z13sumMatrixRowsiiPdS_, .-_Z13sumMatrixRowsiiPdS_
.section .rodata.str1.8
.align 8
.LC9:
.string "El programa recibe M la dimensi\303\263n de la matriz cuadrada aleatoria a sumar por filas"
.section .rodata.str1.1
.LC10:
.string "sum_h before: "
.LC11:
.string "%f"
.LC12:
.string "\n"
.LC13:
.string "sum_h after: "
.LC14:
.string "%f "
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rsi, %rbx
cmpl $1, %edi
jle .L41
.L32:
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movl %eax, 12(%rsp)
movl %eax, %ebx
imull %eax, %ebx
movslq %ebx, %rbx
leaq 0(,%rbx,8), %rsi
movl $64, %edi
call aligned_alloc@PLT
movq %rax, %r15
movslq %ebp, %r12
leaq 0(,%r12,8), %rsi
movl $64, %edi
call aligned_alloc@PLT
movq %rax, %r14
movq %rbx, %rsi
movq %r15, %rdi
call _Z13random_vectorPdm
movq %r12, %rsi
movq %r14, %rdi
call _Z11array_zerosPdm
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %ebp, %ebp
jle .L33
movq %r14, %rbx
leal -1(%rbp), %eax
leaq 8(%r14,%rax,8), %r12
movq %r14, %rbp
leaq .LC11(%rip), %r13
.L34:
movsd 0(%rbp), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $8, %rbp
cmpq %r12, %rbp
jne .L34
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r14, %rcx
movq %r15, %rdx
movl 12(%rsp), %edi
movl %edi, %esi
call _Z13sumMatrixRowsiiPdS_
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC14(%rip), %rbp
.L35:
movsd (%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $8, %rbx
cmpq %r12, %rbx
jne .L35
.L36:
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L32
.L33:
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r14, %rcx
movq %r15, %rdx
movl 12(%rsp), %edi
movl %edi, %esi
call _Z13sumMatrixRowsiiPdS_
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L36
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC15:
.string "_Z19sumMatrixRowsKerneliPdS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z19sumMatrixRowsKerneliPdS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -4194304
.long 1105199103
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "sumMatrixRows.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI0_0:
.quad 0x41dfffffffc00000 # double 2147483647
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
cmpl $1, %edi
jg .LBB0_2
# %bb.1:
movl $.L.str, %edi
xorl %eax, %eax
callq printf
.LBB0_2:
movq 8(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movl %r14d, %r12d
imull %r12d, %r12d
leaq (,%r12,8), %rsi
movl $64, %edi
callq aligned_alloc
movq %rax, %r15
movq %r14, %r13
shlq $32, %r13
movq %r13, %rsi
sarq $29, %rsi
movl $64, %edi
callq aligned_alloc
movq %rax, %rbx
testl %r12d, %r12d
je .LBB0_5
# %bb.3: # %.lr.ph.i.preheader
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB0_4: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd .LCPI0_0(%rip), %xmm0
movsd %xmm0, (%r15,%rbp,8)
incq %rbp
cmpq %rbp, %r12
jne .LBB0_4
.LBB0_5: # %_Z13random_vectorPdm.exit
testq %r13, %r13
je .LBB0_8
# %bb.6: # %.lr.ph.i28.preheader
movslq %r14d, %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB0_7: # %.lr.ph.i28
# =>This Inner Loop Header: Depth=1
movq $0, (%rbx,%rcx,8)
incq %rcx
movl %ecx, %edx
cmpq %rdx, %rax
ja .LBB0_7
.LBB0_8: # %_Z11array_zerosPdm.exit
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
testl %r14d, %r14d
jle .LBB0_11
# %bb.9: # %.lr.ph.preheader
movl %r14d, %r12d
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB0_10: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movsd (%rbx,%r13,8), %xmm0 # xmm0 = mem[0],zero
movl $.L.str.2, %edi
movb $1, %al
callq printf
incq %r13
cmpq %r13, %r12
jne .LBB0_10
.LBB0_11: # %._crit_edge
movl $10, %edi
callq putchar@PLT
movl %r14d, %edi
movl %r14d, %esi
movq %r15, %rdx
movq %rbx, %rcx
callq _Z13sumMatrixRowsiiPdS_
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
testl %r14d, %r14d
jle .LBB0_14
# %bb.12: # %.lr.ph33.preheader
movl %r14d, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_13: # %.lr.ph33
# =>This Inner Loop Header: Depth=1
movsd (%rbx,%r15,8), %xmm0 # xmm0 = mem[0],zero
movl $.L.str.5, %edi
movb $1, %al
callq printf
incq %r15
cmpq %r15, %r14
jne .LBB0_13
.LBB0_14: # %._crit_edge34
movl $10, %edi
callq putchar@PLT
xorl %eax, %eax
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z13random_vectorPdm
.LCPI1_0:
.quad 0x41dfffffffc00000 # double 2147483647
.text
.globl _Z13random_vectorPdm
.p2align 4, 0x90
.type _Z13random_vectorPdm,@function
_Z13random_vectorPdm: # @_Z13random_vectorPdm
.cfi_startproc
# %bb.0:
testq %rsi, %rsi
je .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movq %rdi, %r14
movl $1, %ebp
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd .LCPI1_0(%rip), %xmm0
movsd %xmm0, (%r14,%r15,8)
movl %ebp, %r15d
incl %ebp
cmpq %rbx, %r15
jb .LBB1_2
# %bb.3:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z13random_vectorPdm, .Lfunc_end1-_Z13random_vectorPdm
.cfi_endproc
# -- End function
.globl _Z11array_zerosPdm # -- Begin function _Z11array_zerosPdm
.p2align 4, 0x90
.type _Z11array_zerosPdm,@function
_Z11array_zerosPdm: # @_Z11array_zerosPdm
.cfi_startproc
# %bb.0:
testq %rsi, %rsi
je .LBB2_3
# %bb.1: # %.lr.ph.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq $0, (%rdi,%rax,8)
incq %rax
movl %eax, %ecx
cmpq %rsi, %rcx
jb .LBB2_2
.LBB2_3: # %._crit_edge
retq
.Lfunc_end2:
.size _Z11array_zerosPdm, .Lfunc_end2-_Z11array_zerosPdm
.cfi_endproc
# -- End function
.globl _Z13sumMatrixRowsiiPdS_ # -- Begin function _Z13sumMatrixRowsiiPdS_
.p2align 4, 0x90
.type _Z13sumMatrixRowsiiPdS_,@function
_Z13sumMatrixRowsiiPdS_: # @_Z13sumMatrixRowsiiPdS_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rcx, %rbx
movq %rdx, %r12
movl %esi, %ebp
movl %edi, %r15d
movl $.Lstr, %edi
callq puts@PLT
xorl %edi, %edi
callq hipSetDevice
testl %eax, %eax
jne .LBB3_4
# %bb.1:
movl $.Lstr.1, %edi
callq puts@PLT
leal (,%r15,8), %r14d
movl %r14d, %eax
imull %ebp, %eax
movslq %eax, %r13
leaq 16(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
movslq %r14d, %r14
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %r12, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl %r15d, %edx
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rdi, %rdx
orq $1, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_3
# %bb.2:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movl %ebp, 28(%rsp)
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 88(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z19sumMatrixRowsKerneliPdS_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_3:
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_4:
.cfi_def_cfa_offset 176
movl $.Lstr.2, %edi
movl %eax, %ebx
callq puts@PLT
movl $.L.str.8, %edi
movl $.L.str.9, %esi
movl $69, %edx
xorl %eax, %eax
callq printf
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str.10, %edi
movl %ebx, %esi
movq %rax, %rdx
xorl %eax, %eax
callq printf
movl $.Lstr.3, %edi
callq puts@PLT
movl $1, %edi
callq exit
.Lfunc_end3:
.size _Z13sumMatrixRowsiiPdS_, .Lfunc_end3-_Z13sumMatrixRowsiiPdS_
.cfi_endproc
# -- End function
.globl _Z34__device_stub__sumMatrixRowsKerneliPdS_ # -- Begin function _Z34__device_stub__sumMatrixRowsKerneliPdS_
.p2align 4, 0x90
.type _Z34__device_stub__sumMatrixRowsKerneliPdS_,@function
_Z34__device_stub__sumMatrixRowsKerneliPdS_: # @_Z34__device_stub__sumMatrixRowsKerneliPdS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19sumMatrixRowsKerneliPdS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end4:
.size _Z34__device_stub__sumMatrixRowsKerneliPdS_, .Lfunc_end4-_Z34__device_stub__sumMatrixRowsKerneliPdS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19sumMatrixRowsKerneliPdS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "El programa recibe M la dimensi\303\263n de la matriz cuadrada aleatoria a sumar por filas"
.size .L.str, 85
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "sum_h before: "
.size .L.str.1, 15
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%f"
.size .L.str.2, 3
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "sum_h after: "
.size .L.str.4, 14
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "%f "
.size .L.str.5, 4
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Error: %s:%d, "
.size .L.str.8, 15
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/maxibove13/GPGPU/master/test_cuda/sumMatrixRows.hip"
.size .L.str.9, 109
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "code: %d, reason: %s\n"
.size .L.str.10, 22
.type _Z19sumMatrixRowsKerneliPdS_,@object # @_Z19sumMatrixRowsKerneliPdS_
.section .rodata,"a",@progbits
.globl _Z19sumMatrixRowsKerneliPdS_
.p2align 3, 0x0
_Z19sumMatrixRowsKerneliPdS_:
.quad _Z34__device_stub__sumMatrixRowsKerneliPdS_
.size _Z19sumMatrixRowsKerneliPdS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19sumMatrixRowsKerneliPdS_"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Checking CUDA Call... "
.size .Lstr, 23
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Call Checked OK, region... "
.size .Lstr.1, 28
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Error checking CUDA Call... "
.size .Lstr.2, 29
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Call Checked with error, stopping..."
.size .Lstr.3, 37
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__sumMatrixRowsKerneliPdS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19sumMatrixRowsKerneliPdS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cstdio>
int getThreadNum()
{
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
printf("gpu num %d\n", count);
cudaGetDeviceProperties(&prop, 0);
printf("max thread num : %d\n", prop.maxThreadsPerBlock);
printf("grid dimensions : %d %d %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
return prop.maxThreadsPerBlock;
}
__global__ void conv (float* img, float* kernel, float* result, int width, int
height, int channel, int kernelSize)
{
int ti = threadIdx.x;
int bi = blockIdx.x;
// int id = (bi * threadNum + ti);
int id = (bi * blockDim.x + ti);
if (id >= width * height * channel) return;
int row = id / width;
int col = id % width;
for ( int k = 0;k < channel;++k ) {
for ( int i = 0;i < kernelSize;++i ) {
for ( int j = 0;j < kernelSize;++j ) {
float imgValue = 0.0;
int curRow = row - kernelSize >> 1 + i;
int curCol = col - kernelSize >> 1 + j;
if ( curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
continue;
} else {
imgValue = img[curRow * width + curCol];
}
result[id] += kernel[k * kernelSize * kernelSize + i * kernelSize + j] * imgValue;
}
}
}
// for ( int i = 0;i < kernelSize; ++i ) {
// for ( int j = 0;j < kernelSize; ++j ) {
// float imgValue = 0.0;
// int curRow = row - kernelSize / 2 + i;
// int curCol = col - kernelSize / 2 + j;
// if (curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
// continue;
// } else {
// imgValue = img[curRow * width + curCol];
// }
// result[id] += kernel[i * kernelSize + j] * imgValue;
// }
// }
}
int main(void)
{
int width = 1920;
int height = 1080;
int inChannel = 1;
int outChannel = 8;
float* img = new float[width * height];
for ( int row = 0;row < height;++row ) {
for (int col = 0;col < width;++col ) {
img[col + row * width] = (col + row) % 256;
}
}
int kernelSize = 5;
float* kernel = new float[outChannel * kernelSize * kernelSize];
for ( int i = 0;i < outChannel * kernelSize * kernelSize; ++i) {
kernel[i] = i % kernelSize - 1;
}
float* imgGpu;
float* kernelGpu;
float* resultGpu;
cudaMalloc((void**)&imgGpu, inChannel * width * height * sizeof(float));
cudaMalloc((void**)&kernelGpu, outChannel * kernelSize * kernelSize * sizeof(float));
cudaMalloc((void**)&resultGpu, outChannel * width * height * sizeof(float));
cudaMemcpy(imgGpu, img, inChannel * width * height * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(kernelGpu, kernel, outChannel * kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice);
int threadNum = getThreadNum();
int blockNum = (width * height - 0.5) / threadNum + 1;
conv<<<blockNum, threadNum>>>(imgGpu, kernelGpu, resultGpu, width, height,
outChannel, kernelSize);
float* result = new float[outChannel * width * height];
cudaMemcpy(result, resultGpu, outChannel * width * height * sizeof(float), cudaMemcpyDeviceToHost);
for ( int i = 0;i < 10;++i ) {
for ( int j = 0;j < 10;++j ) {
printf("%2.0f ", img[i * width + j]);
}
printf("\n");
}
printf("\n");
for ( int k = 0;k < 2;++k ) {
for ( int i = 0;i < kernelSize;++i ) {
for ( int j = 0;j < kernelSize;++j ) {
printf("%2.0f ", kernel[k * kernelSize * kernelSize + i * kernelSize + j]);
}
printf("\n");
}
printf("\n\n");
}
for ( int k = 0;k < 2;++k ){
for ( int i = 0;i < 10;++i ) {
for ( int j = 0;j < 10;++j ) {
printf("%2.0f ", result[k * height * width + i * width + j]);
}
printf("\n");
}
printf("\n\n");
}
return 0;
} | code for sm_80
Function : _Z4convPfS_S_iiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x178] ; /* 0x00005e0000047ab9 */
/* 0x000fe40000000a00 */
/*0030*/ UIMAD UR4, UR5, UR4, URZ ; /* 0x00000004050472a4 */
/* 0x000fe2000f8e023f */
/*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0050*/ ULDC UR5, c[0x0][0x180] ; /* 0x0000600000057ab9 */
/* 0x000fe40000000800 */
/*0060*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fe2000f8e023f */
/*0070*/ IMAD R5, R5, c[0x0][0x0], R0 ; /* 0x0000000005057a24 */
/* 0x001fca00078e0200 */
/*0080*/ ISETP.GE.AND P0, PT, R5, UR4, PT ; /* 0x0000000405007c0c */
/* 0x000fda000bf06270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IMAD.MOV.U32 R6, RZ, RZ, 0x1 ; /* 0x00000001ff067424 */
/* 0x000fca00078e00ff */
/*00b0*/ ISETP.LE.AND P0, PT, R6, c[0x0][0x180], PT ; /* 0x0000600006007a0c */
/* 0x000fda0003f03270 */
/*00c0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00d0*/ IABS R7, c[0x0][0x178] ; /* 0x00005e0000077a13 */
/* 0x000fe40000000000 */
/*00e0*/ ISETP.LE.AND P0, PT, R6, c[0x0][0x184], PT ; /* 0x0000610006007a0c */
/* 0x000fe40003f03270 */
/*00f0*/ I2F.RP R0, R7 ; /* 0x0000000700007306 */
/* 0x000e300000209400 */
/*0100*/ MUFU.RCP R0, R0 ; /* 0x0000000000007308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R2, R0, 0xffffffe, RZ ; /* 0x0ffffffe00027810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*0130*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x001fe400078e00ff */
/*0140*/ IMAD.MOV R4, RZ, RZ, -R3 ; /* 0x000000ffff047224 */
/* 0x002fc800078e0a03 */
/*0150*/ IMAD R9, R4, R7, RZ ; /* 0x0000000704097224 */
/* 0x000fe200078e02ff */
/*0160*/ IABS R4, R5 ; /* 0x0000000500047213 */
/* 0x000fc60000000000 */
/*0170*/ IMAD.HI.U32 R3, R3, R9, R2 ; /* 0x0000000903037227 */
/* 0x000fcc00078e0002 */
/*0180*/ IMAD.HI.U32 R3, R3, R4, RZ ; /* 0x0000000403037227 */
/* 0x000fc800078e00ff */
/*0190*/ IMAD.MOV R0, RZ, RZ, -R3 ; /* 0x000000ffff007224 */
/* 0x000fc800078e0a03 */
/*01a0*/ IMAD R0, R7, R0, R4 ; /* 0x0000000007007224 */
/* 0x000fca00078e0204 */
/*01b0*/ ISETP.GT.U32.AND P3, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f64070 */
/*01c0*/ @!P3 IMAD.IADD R0, R0, 0x1, -R7 ; /* 0x000000010000b824 */
/* 0x000fe200078e0a07 */
/*01d0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fec0003800000 */
/*01e0*/ ISETP.GE.U32.AND P2, PT, R0, R7, PT ; /* 0x000000070000720c */
/* 0x000fe20003f46070 */
/*01f0*/ IMAD.MOV.U32 R8, RZ, RZ, 0x4 ; /* 0x00000004ff087424 */
/* 0x000fe200078e00ff */
/*0200*/ LOP3.LUT R2, R5.reuse, c[0x0][0x178], RZ, 0x3c, !PT ; /* 0x00005e0005027a12 */
/* 0x040fe200078e3cff */
/*0210*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fe200078e00ff */
/*0220*/ ISETP.GE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f06270 */
/*0230*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0240*/ ISETP.GE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe40003f26270 */
/*0250*/ @!P3 IADD3 R3, R3, 0x1, RZ ; /* 0x000000010303b810 */
/* 0x000fe40007ffe0ff */
/*0260*/ ISETP.GT.U32.AND P3, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fe20003f64070 */
/*0270*/ IMAD.IADD R7, R0, 0x1, -R7 ; /* 0x0000000100077824 */
/* 0x000fe200078e0a07 */
/*0280*/ LOP3.LUT R2, RZ, c[0x0][0x178], RZ, 0x33, !PT ; /* 0x00005e00ff027a12 */
/* 0x000fc400078e33ff */
/*0290*/ @P2 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103032810 */
/* 0x000fe40007ffe0ff */
/*02a0*/ SEL R7, R7, R0, !P3 ; /* 0x0000000007077207 */
/* 0x000fe20005800000 */
/*02b0*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff007624 */
/* 0x000fe200078e00ff */
/*02c0*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x178], PT ; /* 0x00005e00ff007a0c */
/* 0x000fe20003f45270 */
/*02d0*/ @!P1 IMAD.MOV R3, RZ, RZ, -R3 ; /* 0x000000ffff039224 */
/* 0x000fe400078e0a03 */
/*02e0*/ @!P0 IMAD.MOV R7, RZ, RZ, -R7 ; /* 0x000000ffff078224 */
/* 0x000fe200078e0a07 */
/*02f0*/ LOP3.LUT R0, R0, 0x3, RZ, 0xc0, !PT ; /* 0x0000000300007812 */
/* 0x000fe400078ec0ff */
/*0300*/ SEL R6, R2, R3, !P2 ; /* 0x0000000302067207 */
/* 0x000fc40005000000 */
/*0310*/ SEL R7, R2, R7, !P2 ; /* 0x0000000702077207 */
/* 0x000fe20005000000 */
/*0320*/ IMAD.WIDE R2, R5, R8, c[0x0][0x170] ; /* 0x00005c0005027625 */
/* 0x000fe200078e0208 */
/*0330*/ IADD3 R16, R0, -c[0x0][0x184], RZ ; /* 0x8000610000107a10 */
/* 0x000fe40007ffe0ff */
/*0340*/ IADD3 R5, R7, -c[0x0][0x184], RZ ; /* 0x8000610007057a10 */
/* 0x000fe40007ffe0ff */
/*0350*/ IADD3 R6, R6, -c[0x0][0x184], RZ ; /* 0x8000610006067a10 */
/* 0x000fe40007ffe0ff */
/*0360*/ IMAD.MOV.U32 R13, RZ, RZ, RZ ; /* 0x000000ffff0d7224 */
/* 0x001fca00078e00ff */
/*0370*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff077624 */
/* 0x001fe200078e00ff */
/*0380*/ IADD3 R11, R13, 0x1, RZ ; /* 0x000000010d0b7810 */
/* 0x000fe20007ffe0ff */
/*0390*/ IMAD R10, R4, c[0x0][0x184], R13 ; /* 0x00006100040a7a24 */
/* 0x000fe200078e020d */
/*03a0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*03b0*/ ISETP.NE.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fc40003f45270 */
/*03c0*/ IADD3 R7, R7, -0x1, RZ ; /* 0xffffffff07077810 */
/* 0x000fe20007ffe0ff */
/*03d0*/ IMAD R10, R10, c[0x0][0x184], RZ ; /* 0x000061000a0a7a24 */
/* 0x000fe200078e02ff */
/*03e0*/ ISETP.GE.AND P3, PT, R11, c[0x0][0x184], PT ; /* 0x000061000b007a0c */
/* 0x000fe40003f66270 */
/*03f0*/ ISETP.GE.U32.AND P0, PT, R7, 0x3, PT ; /* 0x000000030700780c */
/* 0x000fe40003f06070 */
/*0400*/ SHF.R.S32.HI R7, RZ, R11, R6 ; /* 0x0000000bff077219 */
/* 0x000fd60000011406 */
/*0410*/ @!P0 BRA 0x7f0 ; /* 0x000003d000008947 */
/* 0x000fea0003800000 */
/*0420*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4 ; /* 0x00000004ff097424 */
/* 0x000fe200078e00ff */
/*0430*/ ISETP.GE.AND P4, PT, R7, c[0x0][0x17c], PT ; /* 0x00005f0007007a0c */
/* 0x000fe20003f86270 */
/*0440*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe40008000000 */
/*0450*/ IMAD.WIDE R8, R10, R9, c[0x0][0x168] ; /* 0x00005a000a087625 */
/* 0x000fc800078e0209 */
/*0460*/ UIADD3 UR5, UR4, 0x1, URZ ; /* 0x0000000104057890 */
/* 0x000fcc000fffe03f */
/*0470*/ SHF.R.S32.HI R12, RZ, UR5, R5 ; /* 0x00000005ff0c7c19 */
/* 0x000fc80008011405 */
/*0480*/ LOP3.LUT R13, R12, R7, RZ, 0xfc, !PT ; /* 0x000000070c0d7212 */
/* 0x000fc800078efcff */
/*0490*/ ISETP.LT.OR P1, PT, R13, RZ, P4 ; /* 0x000000ff0d00720c */
/* 0x000fc80002721670 */
/*04a0*/ ISETP.GE.OR P1, PT, R12, c[0x0][0x178], P1 ; /* 0x00005e000c007a0c */
/* 0x000fda0000f26670 */
/*04b0*/ @!P1 IMAD.MOV.U32 R13, RZ, RZ, 0x4 ; /* 0x00000004ff0d9424 */
/* 0x000fe200078e00ff */
/*04c0*/ @!P1 LDG.E R14, [R8.64] ; /* 0x00000006080e9981 */
/* 0x000ea2000c1e1900 */
/*04d0*/ @!P1 IMAD R12, R7, c[0x0][0x178], R12 ; /* 0x00005e00070c9a24 */
/* 0x000fc600078e020c */
/*04e0*/ @!P1 LDG.E R15, [R2.64] ; /* 0x00000006020f9981 */
/* 0x000ea2000c1e1900 */
/*04f0*/ @!P1 IMAD.WIDE R12, R12, R13, c[0x0][0x160] ; /* 0x000058000c0c9625 */
/* 0x000fcc00078e020d */
/*0500*/ @!P1 LDG.E R13, [R12.64] ; /* 0x000000060c0d9981 */
/* 0x000ea2000c1e1900 */
/*0510*/ UIADD3 UR5, UR4, 0x2, URZ ; /* 0x0000000204057890 */
/* 0x000fcc000fffe03f */
/*0520*/ SHF.R.S32.HI R18, RZ, UR5, R5 ; /* 0x00000005ff127c19 */
/* 0x000fc80008011405 */
/*0530*/ LOP3.LUT R17, R18, R7, RZ, 0xfc, !PT ; /* 0x0000000712117212 */
/* 0x000fc800078efcff */
/*0540*/ ISETP.LT.OR P0, PT, R17, RZ, P4 ; /* 0x000000ff1100720c */
/* 0x000fc80002701670 */
/*0550*/ ISETP.GE.OR P0, PT, R18, c[0x0][0x178], P0 ; /* 0x00005e0012007a0c */
/* 0x000fda0000706670 */
/*0560*/ @!P0 IMAD.MOV.U32 R19, RZ, RZ, 0x4 ; /* 0x00000004ff138424 */
/* 0x000fe400078e00ff */
/*0570*/ @!P0 IMAD R18, R7, c[0x0][0x178], R18 ; /* 0x00005e0007128a24 */
/* 0x000fe400078e0212 */
/*0580*/ @!P1 FFMA R17, R14, R13, R15 ; /* 0x0000000d0e119223 */
/* 0x004fe4000000000f */
/*0590*/ @!P0 IMAD.WIDE R14, R18, R19, c[0x0][0x160] ; /* 0x00005800120e8625 */
/* 0x000fc600078e0213 */
/*05a0*/ @!P1 STG.E [R2.64], R17 ; /* 0x0000001102009986 */
/* 0x0001e8000c101906 */
/*05b0*/ @!P0 LDG.E R15, [R14.64] ; /* 0x000000060e0f8981 */
/* 0x000e28000c1e1900 */
/*05c0*/ @!P0 LDG.E R12, [R8.64+0x4] ; /* 0x00000406080c8981 */
/* 0x000e28000c1e1900 */
/*05d0*/ @!P0 LDG.E R18, [R2.64] ; /* 0x0000000602128981 */
/* 0x000e22000c1e1900 */
/*05e0*/ UIADD3 UR5, UR4, 0x3, URZ ; /* 0x0000000304057890 */
/* 0x000fcc000fffe03f */
/*05f0*/ SHF.R.S32.HI R20, RZ, UR5, R5 ; /* 0x00000005ff147c19 */
/* 0x000fc80008011405 */
/*0600*/ LOP3.LUT R13, R20, R7, RZ, 0xfc, !PT ; /* 0x00000007140d7212 */
/* 0x000fc800078efcff */
/*0610*/ ISETP.LT.OR P1, PT, R13, RZ, P4 ; /* 0x000000ff0d00720c */
/* 0x000fc80002721670 */
/*0620*/ ISETP.GE.OR P1, PT, R20, c[0x0][0x178], P1 ; /* 0x00005e0014007a0c */
/* 0x000fda0000f26670 */
/*0630*/ @!P1 IMAD.MOV.U32 R22, RZ, RZ, 0x4 ; /* 0x00000004ff169424 */
/* 0x000fe400078e00ff */
/*0640*/ @!P1 IMAD R13, R7, c[0x0][0x178], R20 ; /* 0x00005e00070d9a24 */
/* 0x000fe400078e0214 */
/*0650*/ @!P0 FFMA R17, R12, R15, R18 ; /* 0x0000000f0c118223 */
/* 0x001fe40000000012 */
/*0660*/ @!P1 IMAD.WIDE R12, R13, R22, c[0x0][0x160] ; /* 0x000058000d0c9625 */
/* 0x000fc600078e0216 */
/*0670*/ @!P0 STG.E [R2.64], R17 ; /* 0x0000001102008986 */
/* 0x0001e8000c101906 */
/*0680*/ @!P1 LDG.E R13, [R12.64] ; /* 0x000000060c0d9981 */
/* 0x000ea8000c1e1900 */
/*0690*/ @!P1 LDG.E R14, [R8.64+0x8] ; /* 0x00000806080e9981 */
/* 0x000ea8000c1e1900 */
/*06a0*/ @!P1 LDG.E R19, [R2.64] ; /* 0x0000000602139981 */
/* 0x000ea2000c1e1900 */
/*06b0*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fcc000fffe03f */
/*06c0*/ SHF.R.S32.HI R18, RZ, UR4, R5 ; /* 0x00000004ff127c19 */
/* 0x000fc80008011405 */
/*06d0*/ LOP3.LUT R15, R18, R7, RZ, 0xfc, !PT ; /* 0x00000007120f7212 */
/* 0x000fc800078efcff */
/*06e0*/ ISETP.LT.OR P0, PT, R15, RZ, P4 ; /* 0x000000ff0f00720c */
/* 0x000fc80002701670 */
/*06f0*/ ISETP.GE.OR P0, PT, R18, c[0x0][0x178], P0 ; /* 0x00005e0012007a0c */
/* 0x000fda0000706670 */
/*0700*/ @!P0 IMAD.MOV.U32 R21, RZ, RZ, 0x4 ; /* 0x00000004ff158424 */
/* 0x000fe400078e00ff */
/*0710*/ @!P0 IMAD R18, R7, c[0x0][0x178], R18 ; /* 0x00005e0007128a24 */
/* 0x000fe400078e0212 */
/*0720*/ @!P1 FFMA R15, R14, R13, R19 ; /* 0x0000000d0e0f9223 */
/* 0x004fe40000000013 */
/*0730*/ @!P0 IMAD.WIDE R12, R18, R21, c[0x0][0x160] ; /* 0x00005800120c8625 */
/* 0x000fc600078e0215 */
/*0740*/ @!P1 STG.E [R2.64], R15 ; /* 0x0000000f02009986 */
/* 0x0003e8000c101906 */
/*0750*/ @!P0 LDG.E R13, [R12.64] ; /* 0x000000060c0d8981 */
/* 0x000ea8000c1e1900 */
/*0760*/ @!P0 LDG.E R14, [R8.64+0xc] ; /* 0x00000c06080e8981 */
/* 0x0006a8000c1e1900 */
/*0770*/ @!P0 LDG.E R17, [R2.64] ; /* 0x0000000602118981 */
/* 0x001ea2000c1e1900 */
/*0780*/ IADD3 R8, P1, R8, 0x10, RZ ; /* 0x0000001008087810 */
/* 0x008fca0007f3e0ff */
/*0790*/ IMAD.X R9, RZ, RZ, R9, P1 ; /* 0x000000ffff097224 */
/* 0x000fe400008e0609 */
/*07a0*/ @!P0 FFMA R17, R14, R13, R17 ; /* 0x0000000d0e118223 */
/* 0x004fe20000000011 */
/*07b0*/ IADD3 R14, R16, UR4, RZ ; /* 0x00000004100e7c10 */
/* 0x000fc8000fffe0ff */
/*07c0*/ @!P0 STG.E [R2.64], R17 ; /* 0x0000001102008986 */
/* 0x0003e2000c101906 */
/*07d0*/ ISETP.NE.AND P0, PT, R14, RZ, PT ; /* 0x000000ff0e00720c */
/* 0x000fda0003f05270 */
/*07e0*/ @P0 BRA 0x460 ; /* 0xfffffc7000000947 */
/* 0x002fea000383ffff */
/*07f0*/ IMAD.MOV.U32 R13, RZ, RZ, R11 ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e000b */
/*0800*/ @!P2 BRA 0xb60 ; /* 0x000003500000a947 */
/* 0x000fea0003800000 */
/*0810*/ UIADD3 UR5, UR4, 0x1, URZ ; /* 0x0000000104057890 */
/* 0x000fe2000fffe03f */
/*0820*/ ISETP.GE.AND P0, PT, R7, c[0x0][0x17c], PT ; /* 0x00005f0007007a0c */
/* 0x000fe20003f06270 */
/*0830*/ BSSY B0, 0x950 ; /* 0x0000011000007945 */
/* 0x000fe20003800000 */
/*0840*/ IADD3 R9, R10, UR4, RZ ; /* 0x000000040a097c10 */
/* 0x000fe2000fffe0ff */
/*0850*/ IMAD.MOV.U32 R10, RZ, RZ, 0x4 ; /* 0x00000004ff0a7424 */
/* 0x000fe200078e00ff */
/*0860*/ ISETP.NE.AND P2, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fe40003f45270 */
/*0870*/ SHF.R.S32.HI R12, RZ, UR5, R5 ; /* 0x00000005ff0c7c19 */
/* 0x000fc80008011405 */
/*0880*/ LOP3.LUT R8, R12, R7, RZ, 0xfc, !PT ; /* 0x000000070c087212 */
/* 0x000fc800078efcff */
/*0890*/ ISETP.LT.OR P1, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */
/* 0x000fe20000721670 */
/*08a0*/ IMAD.WIDE R8, R9, R10, c[0x0][0x168] ; /* 0x00005a0009087625 */
/* 0x000fc600078e020a */
/*08b0*/ ISETP.GE.OR P1, PT, R12, c[0x0][0x178], P1 ; /* 0x00005e000c007a0c */
/* 0x000fda0000f26670 */
/*08c0*/ @P1 BRA 0x940 ; /* 0x0000007000001947 */
/* 0x000fea0003800000 */
/*08d0*/ IMAD R15, R7, c[0x0][0x178], R12 ; /* 0x00005e00070f7a24 */
/* 0x000fe200078e020c */
/*08e0*/ LDG.E R11, [R8.64] ; /* 0x00000006080b7981 */
/* 0x000ea6000c1e1900 */
/*08f0*/ IMAD.WIDE R14, R15, R10, c[0x0][0x160] ; /* 0x000058000f0e7625 */
/* 0x000fe200078e020a */
/*0900*/ LDG.E R12, [R2.64] ; /* 0x00000006020c7981 */
/* 0x000eaa000c1e1900 */
/*0910*/ LDG.E R14, [R14.64] ; /* 0x000000060e0e7981 */
/* 0x000ea4000c1e1900 */
/*0920*/ FFMA R11, R11, R14, R12 ; /* 0x0000000e0b0b7223 */
/* 0x004fca000000000c */
/*0930*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x0001e4000c101906 */
/*0940*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0950*/ @!P2 BRA 0xb60 ; /* 0x000002000000a947 */
/* 0x000fea0003800000 */
/*0960*/ UIADD3 UR5, UR4, 0x2, URZ ; /* 0x0000000204057890 */
/* 0x000fe2000fffe03f */
/*0970*/ BSSY B0, 0xa60 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*0980*/ ISETP.NE.AND P2, PT, R0, 0x2, PT ; /* 0x000000020000780c */
/* 0x000fc80003f45270 */
/*0990*/ SHF.R.S32.HI R12, RZ, UR5, R5 ; /* 0x00000005ff0c7c19 */
/* 0x000fc80008011405 */
/*09a0*/ LOP3.LUT R11, R12, R7, RZ, 0xfc, !PT ; /* 0x000000070c0b7212 */
/* 0x001fc800078efcff */
/*09b0*/ ISETP.LT.OR P1, PT, R11, RZ, P0 ; /* 0x000000ff0b00720c */
/* 0x000fc80000721670 */
/*09c0*/ ISETP.GE.OR P1, PT, R12, c[0x0][0x178], P1 ; /* 0x00005e000c007a0c */
/* 0x000fda0000f26670 */
/*09d0*/ @P1 BRA 0xa50 ; /* 0x0000007000001947 */
/* 0x000fea0003800000 */
/*09e0*/ IMAD R15, R7, c[0x0][0x178], R12 ; /* 0x00005e00070f7a24 */
/* 0x000fe200078e020c */
/*09f0*/ LDG.E R11, [R8.64+0x4] ; /* 0x00000406080b7981 */
/* 0x000ea6000c1e1900 */
/*0a00*/ IMAD.WIDE R14, R15, R10, c[0x0][0x160] ; /* 0x000058000f0e7625 */
/* 0x000fe200078e020a */
/*0a10*/ LDG.E R12, [R2.64] ; /* 0x00000006020c7981 */
/* 0x000eaa000c1e1900 */
/*0a20*/ LDG.E R14, [R14.64] ; /* 0x000000060e0e7981 */
/* 0x000ea4000c1e1900 */
/*0a30*/ FFMA R11, R11, R14, R12 ; /* 0x0000000e0b0b7223 */
/* 0x004fca000000000c */
/*0a40*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x0001e4000c101906 */
/*0a50*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0a60*/ @!P2 BRA 0xb60 ; /* 0x000000f00000a947 */
/* 0x000fea0003800000 */
/*0a70*/ UIADD3 UR4, UR4, 0x3, URZ ; /* 0x0000000304047890 */
/* 0x000fe2000fffe03f */
/*0a80*/ BSSY B0, 0xb60 ; /* 0x000000d000007945 */
/* 0x000fea0003800000 */
/*0a90*/ SHF.R.S32.HI R12, RZ, UR4, R5 ; /* 0x00000004ff0c7c19 */
/* 0x000fc80008011405 */
/*0aa0*/ LOP3.LUT R11, R12, R7, RZ, 0xfc, !PT ; /* 0x000000070c0b7212 */
/* 0x001fc800078efcff */
/*0ab0*/ ISETP.LT.OR P0, PT, R11, RZ, P0 ; /* 0x000000ff0b00720c */
/* 0x000fc80000701670 */
/*0ac0*/ ISETP.GE.OR P0, PT, R12, c[0x0][0x178], P0 ; /* 0x00005e000c007a0c */
/* 0x000fda0000706670 */
/*0ad0*/ @P0 BRA 0xb50 ; /* 0x0000007000000947 */
/* 0x000fea0003800000 */
/*0ae0*/ IMAD R11, R7, c[0x0][0x178], R12 ; /* 0x00005e00070b7a24 */
/* 0x000fe200078e020c */
/*0af0*/ LDG.E R8, [R8.64+0x8] ; /* 0x0000080608087981 */
/* 0x000ea6000c1e1900 */
/*0b00*/ IMAD.WIDE R10, R11, R10, c[0x0][0x160] ; /* 0x000058000b0a7625 */
/* 0x000fe200078e020a */
/*0b10*/ LDG.E R7, [R2.64] ; /* 0x0000000602077981 */
/* 0x000eaa000c1e1900 */
/*0b20*/ LDG.E R11, [R10.64] ; /* 0x000000060a0b7981 */
/* 0x000ea4000c1e1900 */
/*0b30*/ FFMA R7, R8, R11, R7 ; /* 0x0000000b08077223 */
/* 0x004fca0000000007 */
/*0b40*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e4000c101906 */
/*0b50*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0b60*/ @!P3 BRA 0x370 ; /* 0xfffff8000000b947 */
/* 0x000fea000383ffff */
/*0b70*/ IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104047810 */
/* 0x000fc80007ffe0ff */
/*0b80*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x180], PT ; /* 0x0000600004007a0c */
/* 0x000fda0003f06270 */
/*0b90*/ @!P0 BRA 0x360 ; /* 0xfffff7c000008947 */
/* 0x000fea000383ffff */
/*0ba0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0bb0*/ BRA 0xbb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0bc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0bd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0be0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0bf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cstdio>
int getThreadNum()
{
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
printf("gpu num %d\n", count);
cudaGetDeviceProperties(&prop, 0);
printf("max thread num : %d\n", prop.maxThreadsPerBlock);
printf("grid dimensions : %d %d %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
return prop.maxThreadsPerBlock;
}
__global__ void conv (float* img, float* kernel, float* result, int width, int
height, int channel, int kernelSize)
{
int ti = threadIdx.x;
int bi = blockIdx.x;
// int id = (bi * threadNum + ti);
int id = (bi * blockDim.x + ti);
if (id >= width * height * channel) return;
int row = id / width;
int col = id % width;
for ( int k = 0;k < channel;++k ) {
for ( int i = 0;i < kernelSize;++i ) {
for ( int j = 0;j < kernelSize;++j ) {
float imgValue = 0.0;
int curRow = row - kernelSize >> 1 + i;
int curCol = col - kernelSize >> 1 + j;
if ( curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
continue;
} else {
imgValue = img[curRow * width + curCol];
}
result[id] += kernel[k * kernelSize * kernelSize + i * kernelSize + j] * imgValue;
}
}
}
// for ( int i = 0;i < kernelSize; ++i ) {
// for ( int j = 0;j < kernelSize; ++j ) {
// float imgValue = 0.0;
// int curRow = row - kernelSize / 2 + i;
// int curCol = col - kernelSize / 2 + j;
// if (curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
// continue;
// } else {
// imgValue = img[curRow * width + curCol];
// }
// result[id] += kernel[i * kernelSize + j] * imgValue;
// }
// }
}
int main(void)
{
int width = 1920;
int height = 1080;
int inChannel = 1;
int outChannel = 8;
float* img = new float[width * height];
for ( int row = 0;row < height;++row ) {
for (int col = 0;col < width;++col ) {
img[col + row * width] = (col + row) % 256;
}
}
int kernelSize = 5;
float* kernel = new float[outChannel * kernelSize * kernelSize];
for ( int i = 0;i < outChannel * kernelSize * kernelSize; ++i) {
kernel[i] = i % kernelSize - 1;
}
float* imgGpu;
float* kernelGpu;
float* resultGpu;
cudaMalloc((void**)&imgGpu, inChannel * width * height * sizeof(float));
cudaMalloc((void**)&kernelGpu, outChannel * kernelSize * kernelSize * sizeof(float));
cudaMalloc((void**)&resultGpu, outChannel * width * height * sizeof(float));
cudaMemcpy(imgGpu, img, inChannel * width * height * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(kernelGpu, kernel, outChannel * kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice);
int threadNum = getThreadNum();
int blockNum = (width * height - 0.5) / threadNum + 1;
conv<<<blockNum, threadNum>>>(imgGpu, kernelGpu, resultGpu, width, height,
outChannel, kernelSize);
float* result = new float[outChannel * width * height];
cudaMemcpy(result, resultGpu, outChannel * width * height * sizeof(float), cudaMemcpyDeviceToHost);
for ( int i = 0;i < 10;++i ) {
for ( int j = 0;j < 10;++j ) {
printf("%2.0f ", img[i * width + j]);
}
printf("\n");
}
printf("\n");
for ( int k = 0;k < 2;++k ) {
for ( int i = 0;i < kernelSize;++i ) {
for ( int j = 0;j < kernelSize;++j ) {
printf("%2.0f ", kernel[k * kernelSize * kernelSize + i * kernelSize + j]);
}
printf("\n");
}
printf("\n\n");
}
for ( int k = 0;k < 2;++k ){
for ( int i = 0;i < 10;++i ) {
for ( int j = 0;j < 10;++j ) {
printf("%2.0f ", result[k * height * width + i * width + j]);
}
printf("\n");
}
printf("\n\n");
}
return 0;
} | .file "tmpxft_0001e30a_00000000-6_main_8155.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "gpu num %d\n"
.LC1:
.string "max thread num : %d\n"
.LC2:
.string "grid dimensions : %d %d %d\n"
.text
.globl _Z12getThreadNumv
.type _Z12getThreadNumv, @function
_Z12getThreadNumv:
.LFB2057:
.cfi_startproc
endbr64
subq $1064, %rsp
.cfi_def_cfa_offset 1072
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 16(%rsp), %rdi
movl $0, %esi
call cudaGetDeviceProperties_v2@PLT
movl 336(%rsp), %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 360(%rsp), %r8d
movl 356(%rsp), %ecx
movl 352(%rsp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %eax
movq 1048(%rsp), %rdx
subq %fs:40, %rdx
jne .L6
addq $1064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z12getThreadNumv, .-_Z12getThreadNumv
.globl _Z31__device_stub__Z4convPfS_S_iiiiPfS_S_iiii
.type _Z31__device_stub__Z4convPfS_S_iiiiPfS_S_iiii, @function
_Z31__device_stub__Z4convPfS_S_iiiiPfS_S_iiii:
.LFB2083:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z4convPfS_S_iiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z31__device_stub__Z4convPfS_S_iiiiPfS_S_iiii, .-_Z31__device_stub__Z4convPfS_S_iiiiPfS_S_iiii
.globl _Z4convPfS_S_iiii
.type _Z4convPfS_S_iiii, @function
_Z4convPfS_S_iiii:
.LFB2084:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z31__device_stub__Z4convPfS_S_iiiiPfS_S_iiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z4convPfS_S_iiii, .-_Z4convPfS_S_iiii
.section .rodata.str1.1
.LC5:
.string "%2.0f "
.LC6:
.string "\n"
.LC7:
.string "\n\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $8294400, %edi
call _Znam@PLT
movq %rax, %r14
movq %rax, %r8
movl $1920, %edi
.L16:
leal -1920(%rdi), %edx
movq %r8, %rsi
.L17:
movl %edx, %ecx
sarl $31, %ecx
shrl $24, %ecx
leal (%rdx,%rcx), %eax
movzbl %al, %eax
subl %ecx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rsi)
addl $1, %edx
addq $4, %rsi
cmpl %edi, %edx
jne .L17
addq $7680, %r8
addl $1, %edi
cmpl $3000, %edi
jne .L16
movl $800, %edi
call _Znam@PLT
movq %rax, %rbp
movl $0, %edx
.L19:
movslq %edx, %rax
imulq $1717986919, %rax, %rax
sarq $33, %rax
movl %edx, %ecx
sarl $31, %ecx
subl %ecx, %eax
leal (%rax,%rax,4), %ecx
movl %edx, %eax
subl %ecx, %eax
subl $1, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rdx,4)
addq $1, %rdx
cmpq $200, %rdx
jne .L19
leaq 24(%rsp), %rdi
movl $8294400, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $800, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $66355200, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $8294400, %edx
movq %r14, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $800, %edx
movq %rbp, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
call _Z12getThreadNumv
movl %eax, 60(%rsp)
movl $1, 64(%rsp)
pxor %xmm1, %xmm1
cvtsi2sdl %eax, %xmm1
movsd .LC3(%rip), %xmm0
divsd %xmm1, %xmm0
addsd .LC4(%rip), %xmm0
cvttsd2sil %xmm0, %eax
movl %eax, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L41
.L20:
movl $66355200, %edi
call _Znam@PLT
movq %rax, 8(%rsp)
movl $2, %ecx
movl $66355200, %edx
movq 40(%rsp), %rsi
movq %rax, %rdi
call cudaMemcpy@PLT
leaq 40(%r14), %r12
addq $76840, %r14
leaq .LC5(%rip), %r13
leaq .LC6(%rip), %r15
.L21:
leaq -40(%r12), %rbx
.L22:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L22
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $7680, %r12
cmpq %r14, %r12
jne .L21
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $20, %rbp
movl $25, %r14d
leaq .LC5(%rip), %r13
jmp .L24
.L41:
subq $8, %rsp
.cfi_def_cfa_offset 152
pushq $5
.cfi_def_cfa_offset 160
movl $8, %r9d
movl $1080, %r8d
movl $1920, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z31__device_stub__Z4convPfS_S_iiiiPfS_S_iiii
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L20
.L42:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $5, %r15d
addq $20, %r12
cmpl %r14d, %r15d
je .L26
.L28:
leaq -20(%r12), %rbx
.L25:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L25
jmp .L42
.L26:
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $25, %r14d
addq $100, %rbp
cmpl $75, %r14d
je .L43
.L24:
leal -25(%r14), %r15d
movq %rbp, %r12
jmp .L28
.L43:
movq 8(%rsp), %rax
addq $40, %rax
movq %rax, 8(%rsp)
movl $0, %r14d
leaq .LC5(%rip), %r12
jmp .L27
.L44:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1920, %r13d
addq $7680, %rbp
cmpl %r15d, %r13d
je .L30
.L32:
leaq -40(%rbp), %rbx
.L29:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L29
jmp .L44
.L30:
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $2073600, %r14d
addq $8294400, 8(%rsp)
cmpl $4147200, %r14d
je .L31
.L27:
movq 8(%rsp), %rbp
movl %r14d, %r13d
leal 19200(%r14), %r15d
jmp .L32
.L31:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L45
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z4convPfS_S_iiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z4convPfS_S_iiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC3:
.long -2147483648
.long 1094689791
.align 8
.LC4:
.long 0
.long 1072693248
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cstdio>
int getThreadNum()
{
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
printf("gpu num %d\n", count);
cudaGetDeviceProperties(&prop, 0);
printf("max thread num : %d\n", prop.maxThreadsPerBlock);
printf("grid dimensions : %d %d %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
return prop.maxThreadsPerBlock;
}
__global__ void conv (float* img, float* kernel, float* result, int width, int
height, int channel, int kernelSize)
{
int ti = threadIdx.x;
int bi = blockIdx.x;
// int id = (bi * threadNum + ti);
int id = (bi * blockDim.x + ti);
if (id >= width * height * channel) return;
int row = id / width;
int col = id % width;
for ( int k = 0;k < channel;++k ) {
for ( int i = 0;i < kernelSize;++i ) {
for ( int j = 0;j < kernelSize;++j ) {
float imgValue = 0.0;
int curRow = row - kernelSize >> 1 + i;
int curCol = col - kernelSize >> 1 + j;
if ( curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
continue;
} else {
imgValue = img[curRow * width + curCol];
}
result[id] += kernel[k * kernelSize * kernelSize + i * kernelSize + j] * imgValue;
}
}
}
// for ( int i = 0;i < kernelSize; ++i ) {
// for ( int j = 0;j < kernelSize; ++j ) {
// float imgValue = 0.0;
// int curRow = row - kernelSize / 2 + i;
// int curCol = col - kernelSize / 2 + j;
// if (curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
// continue;
// } else {
// imgValue = img[curRow * width + curCol];
// }
// result[id] += kernel[i * kernelSize + j] * imgValue;
// }
// }
}
int main(void)
{
int width = 1920;
int height = 1080;
int inChannel = 1;
int outChannel = 8;
float* img = new float[width * height];
for ( int row = 0;row < height;++row ) {
for (int col = 0;col < width;++col ) {
img[col + row * width] = (col + row) % 256;
}
}
int kernelSize = 5;
float* kernel = new float[outChannel * kernelSize * kernelSize];
for ( int i = 0;i < outChannel * kernelSize * kernelSize; ++i) {
kernel[i] = i % kernelSize - 1;
}
float* imgGpu;
float* kernelGpu;
float* resultGpu;
cudaMalloc((void**)&imgGpu, inChannel * width * height * sizeof(float));
cudaMalloc((void**)&kernelGpu, outChannel * kernelSize * kernelSize * sizeof(float));
cudaMalloc((void**)&resultGpu, outChannel * width * height * sizeof(float));
cudaMemcpy(imgGpu, img, inChannel * width * height * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(kernelGpu, kernel, outChannel * kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice);
int threadNum = getThreadNum();
int blockNum = (width * height - 0.5) / threadNum + 1;
conv<<<blockNum, threadNum>>>(imgGpu, kernelGpu, resultGpu, width, height,
outChannel, kernelSize);
float* result = new float[outChannel * width * height];
cudaMemcpy(result, resultGpu, outChannel * width * height * sizeof(float), cudaMemcpyDeviceToHost);
for ( int i = 0;i < 10;++i ) {
for ( int j = 0;j < 10;++j ) {
printf("%2.0f ", img[i * width + j]);
}
printf("\n");
}
printf("\n");
for ( int k = 0;k < 2;++k ) {
for ( int i = 0;i < kernelSize;++i ) {
for ( int j = 0;j < kernelSize;++j ) {
printf("%2.0f ", kernel[k * kernelSize * kernelSize + i * kernelSize + j]);
}
printf("\n");
}
printf("\n\n");
}
for ( int k = 0;k < 2;++k ){
for ( int i = 0;i < 10;++i ) {
for ( int j = 0;j < 10;++j ) {
printf("%2.0f ", result[k * height * width + i * width + j]);
}
printf("\n");
}
printf("\n\n");
}
return 0;
} | #include <hip/hip_runtime.h>
#include <cstdio>
int getThreadNum()
{
hipDeviceProp_t prop;
int count;
hipGetDeviceCount(&count);
printf("gpu num %d\n", count);
hipGetDeviceProperties(&prop, 0);
printf("max thread num : %d\n", prop.maxThreadsPerBlock);
printf("grid dimensions : %d %d %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
return prop.maxThreadsPerBlock;
}
__global__ void conv (float* img, float* kernel, float* result, int width, int
height, int channel, int kernelSize)
{
int ti = threadIdx.x;
int bi = blockIdx.x;
// int id = (bi * threadNum + ti);
int id = (bi * blockDim.x + ti);
if (id >= width * height * channel) return;
int row = id / width;
int col = id % width;
for ( int k = 0;k < channel;++k ) {
for ( int i = 0;i < kernelSize;++i ) {
for ( int j = 0;j < kernelSize;++j ) {
float imgValue = 0.0;
int curRow = row - kernelSize >> 1 + i;
int curCol = col - kernelSize >> 1 + j;
if ( curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
continue;
} else {
imgValue = img[curRow * width + curCol];
}
result[id] += kernel[k * kernelSize * kernelSize + i * kernelSize + j] * imgValue;
}
}
}
// for ( int i = 0;i < kernelSize; ++i ) {
// for ( int j = 0;j < kernelSize; ++j ) {
// float imgValue = 0.0;
// int curRow = row - kernelSize / 2 + i;
// int curCol = col - kernelSize / 2 + j;
// if (curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
// continue;
// } else {
// imgValue = img[curRow * width + curCol];
// }
// result[id] += kernel[i * kernelSize + j] * imgValue;
// }
// }
}
int main(void)
{
int width = 1920;
int height = 1080;
int inChannel = 1;
int outChannel = 8;
float* img = new float[width * height];
for ( int row = 0;row < height;++row ) {
for (int col = 0;col < width;++col ) {
img[col + row * width] = (col + row) % 256;
}
}
int kernelSize = 5;
float* kernel = new float[outChannel * kernelSize * kernelSize];
for ( int i = 0;i < outChannel * kernelSize * kernelSize; ++i) {
kernel[i] = i % kernelSize - 1;
}
float* imgGpu;
float* kernelGpu;
float* resultGpu;
hipMalloc((void**)&imgGpu, inChannel * width * height * sizeof(float));
hipMalloc((void**)&kernelGpu, outChannel * kernelSize * kernelSize * sizeof(float));
hipMalloc((void**)&resultGpu, outChannel * width * height * sizeof(float));
hipMemcpy(imgGpu, img, inChannel * width * height * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(kernelGpu, kernel, outChannel * kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice);
int threadNum = getThreadNum();
int blockNum = (width * height - 0.5) / threadNum + 1;
conv<<<blockNum, threadNum>>>(imgGpu, kernelGpu, resultGpu, width, height,
outChannel, kernelSize);
float* result = new float[outChannel * width * height];
hipMemcpy(result, resultGpu, outChannel * width * height * sizeof(float), hipMemcpyDeviceToHost);
for ( int i = 0;i < 10;++i ) {
for ( int j = 0;j < 10;++j ) {
printf("%2.0f ", img[i * width + j]);
}
printf("\n");
}
printf("\n");
for ( int k = 0;k < 2;++k ) {
for ( int i = 0;i < kernelSize;++i ) {
for ( int j = 0;j < kernelSize;++j ) {
printf("%2.0f ", kernel[k * kernelSize * kernelSize + i * kernelSize + j]);
}
printf("\n");
}
printf("\n\n");
}
for ( int k = 0;k < 2;++k ){
for ( int i = 0;i < 10;++i ) {
for ( int j = 0;j < 10;++j ) {
printf("%2.0f ", result[k * height * width + i * width + j]);
}
printf("\n");
}
printf("\n\n");
}
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <cstdio>
int getThreadNum()
{
hipDeviceProp_t prop;
int count;
hipGetDeviceCount(&count);
printf("gpu num %d\n", count);
hipGetDeviceProperties(&prop, 0);
printf("max thread num : %d\n", prop.maxThreadsPerBlock);
printf("grid dimensions : %d %d %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
return prop.maxThreadsPerBlock;
}
__global__ void conv (float* img, float* kernel, float* result, int width, int
height, int channel, int kernelSize)
{
int ti = threadIdx.x;
int bi = blockIdx.x;
// int id = (bi * threadNum + ti);
int id = (bi * blockDim.x + ti);
if (id >= width * height * channel) return;
int row = id / width;
int col = id % width;
for ( int k = 0;k < channel;++k ) {
for ( int i = 0;i < kernelSize;++i ) {
for ( int j = 0;j < kernelSize;++j ) {
float imgValue = 0.0;
int curRow = row - kernelSize >> 1 + i;
int curCol = col - kernelSize >> 1 + j;
if ( curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
continue;
} else {
imgValue = img[curRow * width + curCol];
}
result[id] += kernel[k * kernelSize * kernelSize + i * kernelSize + j] * imgValue;
}
}
}
// for ( int i = 0;i < kernelSize; ++i ) {
// for ( int j = 0;j < kernelSize; ++j ) {
// float imgValue = 0.0;
// int curRow = row - kernelSize / 2 + i;
// int curCol = col - kernelSize / 2 + j;
// if (curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
// continue;
// } else {
// imgValue = img[curRow * width + curCol];
// }
// result[id] += kernel[i * kernelSize + j] * imgValue;
// }
// }
}
int main(void)
{
int width = 1920;
int height = 1080;
int inChannel = 1;
int outChannel = 8;
float* img = new float[width * height];
for ( int row = 0;row < height;++row ) {
for (int col = 0;col < width;++col ) {
img[col + row * width] = (col + row) % 256;
}
}
int kernelSize = 5;
float* kernel = new float[outChannel * kernelSize * kernelSize];
for ( int i = 0;i < outChannel * kernelSize * kernelSize; ++i) {
kernel[i] = i % kernelSize - 1;
}
float* imgGpu;
float* kernelGpu;
float* resultGpu;
hipMalloc((void**)&imgGpu, inChannel * width * height * sizeof(float));
hipMalloc((void**)&kernelGpu, outChannel * kernelSize * kernelSize * sizeof(float));
hipMalloc((void**)&resultGpu, outChannel * width * height * sizeof(float));
hipMemcpy(imgGpu, img, inChannel * width * height * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(kernelGpu, kernel, outChannel * kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice);
int threadNum = getThreadNum();
int blockNum = (width * height - 0.5) / threadNum + 1;
conv<<<blockNum, threadNum>>>(imgGpu, kernelGpu, resultGpu, width, height,
outChannel, kernelSize);
float* result = new float[outChannel * width * height];
hipMemcpy(result, resultGpu, outChannel * width * height * sizeof(float), hipMemcpyDeviceToHost);
for ( int i = 0;i < 10;++i ) {
for ( int j = 0;j < 10;++j ) {
printf("%2.0f ", img[i * width + j]);
}
printf("\n");
}
printf("\n");
for ( int k = 0;k < 2;++k ) {
for ( int i = 0;i < kernelSize;++i ) {
for ( int j = 0;j < kernelSize;++j ) {
printf("%2.0f ", kernel[k * kernelSize * kernelSize + i * kernelSize + j]);
}
printf("\n");
}
printf("\n\n");
}
for ( int k = 0;k < 2;++k ){
for ( int i = 0;i < 10;++i ) {
for ( int j = 0;j < 10;++j ) {
printf("%2.0f ", result[k * height * width + i * width + j]);
}
printf("\n");
}
printf("\n\n");
}
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4convPfS_S_iiii
.globl _Z4convPfS_S_iiii
.p2align 8
.type _Z4convPfS_S_iiii,@function
_Z4convPfS_S_iiii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x34
s_load_b64 s[8:9], s[0:1], 0x18
s_load_b32 s3, s[0:1], 0x20
s_mov_b32 s10, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mul_i32 s2, s9, s8
s_cmp_gt_i32 s3, 0
s_mul_i32 s2, s2, s3
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v1
s_cselect_b32 s2, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB0_10
s_ashr_i32 s2, s8, 31
v_ashrrev_i32_e32 v3, 31, v1
s_add_i32 s4, s8, s2
s_clause 0x1
s_load_b32 s11, s[0:1], 0x24
s_load_b64 s[12:13], s[0:1], 0x10
s_xor_b32 s4, s4, s2
s_mov_b32 s14, 0
v_cvt_f32_u32_e32 v0, s4
s_sub_i32 s5, 0, s4
v_add_nc_u32_e32 v4, v1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v0, v0
v_xor_b32_e32 v4, v4, v3
v_xor_b32_e32 v3, s2, v3
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x4f7ffffe, v0
s_waitcnt lgkmcnt(0)
s_cmp_gt_i32 s11, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v0, v0
v_mul_lo_u32 v2, s5, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v2, v0, v2
v_add_nc_u32_e32 v0, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v0, v4, v0
v_mul_lo_u32 v2, v0, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v2, v4, v2
v_add_nc_u32_e32 v4, 1, v0
v_subrev_nc_u32_e32 v5, s4, v2
v_cmp_le_u32_e32 vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v0, v0, v4, vcc_lo
v_cndmask_b32_e32 v2, v2, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v4, 1, v0
v_cmp_le_u32_e32 vcc_lo, s4, v2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v0, v0, v4, vcc_lo
v_xor_b32_e32 v0, v0, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_sub_nc_u32_e32 v4, v0, v3
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, v4, s8
v_sub_nc_u32_e32 v5, v1, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v0, vcc_lo, s12, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s13, v3, vcc_lo
v_subrev_nc_u32_e32 v2, s11, v4
s_delay_alu instid0(VALU_DEP_4)
v_subrev_nc_u32_e32 v3, s11, v5
v_mov_b32_e32 v4, 0
s_cselect_b32 s12, -1, 0
s_mul_i32 s13, s11, s11
s_branch .LBB0_3
.LBB0_2:
s_add_i32 s14, s14, 1
s_add_i32 s10, s10, s13
s_cmp_eq_u32 s14, s3
s_cbranch_scc1 .LBB0_10
.LBB0_3:
s_and_not1_b32 vcc_lo, exec_lo, s12
s_cbranch_vccnz .LBB0_2
s_mov_b32 s15, 0
s_mov_b32 s16, s10
s_branch .LBB0_6
.LBB0_5:
s_set_inst_prefetch_distance 0x2
s_add_i32 s16, s16, s11
s_cmp_eq_u32 s15, s11
s_cbranch_scc1 .LBB0_2
.LBB0_6:
s_add_i32 s15, s15, 1
s_mov_b32 s18, 0
v_ashrrev_i32_e32 v6, s15, v2
s_delay_alu instid0(VALU_DEP_1)
v_mul_lo_u32 v5, v6, s8
v_cmp_lt_i32_e32 vcc_lo, -1, v6
v_cmp_gt_i32_e64 s0, s9, v6
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_8
.p2align 6
.LBB0_7:
s_or_b32 exec_lo, exec_lo, s2
s_cmp_eq_u32 s11, s17
s_mov_b32 s18, s17
s_cbranch_scc1 .LBB0_5
.LBB0_8:
s_add_i32 s17, s18, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v6, s17, v3
v_cmp_lt_i32_e64 s1, -1, v6
v_cmp_gt_i32_e64 s2, s8, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s1, vcc_lo, s1
s_and_b32 s1, s0, s1
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s1, s1, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s2, s1
s_cbranch_execz .LBB0_7
v_add_nc_u32_e32 v6, v6, v5
s_add_i32 s18, s16, s18
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s19, s18, 31
s_lshl_b64 s[18:19], s[18:19], 2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v7, 31, v6
s_waitcnt lgkmcnt(0)
s_add_u32 s18, s6, s18
s_addc_u32 s19, s7, s19
v_lshlrev_b64 v[6:7], 2, v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v6, s1, s4, v6
v_add_co_ci_u32_e64 v7, s1, s5, v7, s1
global_load_b32 v8, v4, s[18:19]
global_load_b32 v6, v[6:7], off
global_load_b32 v7, v[0:1], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v7, v6, v8
global_store_b32 v[0:1], v7, off
s_branch .LBB0_7
.LBB0_10:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4convPfS_S_iiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 20
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4convPfS_S_iiii, .Lfunc_end0-_Z4convPfS_S_iiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4convPfS_S_iiii
.private_segment_fixed_size: 0
.sgpr_count: 22
.sgpr_spill_count: 0
.symbol: _Z4convPfS_S_iiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <cstdio>
int getThreadNum()
{
hipDeviceProp_t prop;
int count;
hipGetDeviceCount(&count);
printf("gpu num %d\n", count);
hipGetDeviceProperties(&prop, 0);
printf("max thread num : %d\n", prop.maxThreadsPerBlock);
printf("grid dimensions : %d %d %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
return prop.maxThreadsPerBlock;
}
__global__ void conv (float* img, float* kernel, float* result, int width, int
height, int channel, int kernelSize)
{
int ti = threadIdx.x;
int bi = blockIdx.x;
// int id = (bi * threadNum + ti);
int id = (bi * blockDim.x + ti);
if (id >= width * height * channel) return;
int row = id / width;
int col = id % width;
for ( int k = 0;k < channel;++k ) {
for ( int i = 0;i < kernelSize;++i ) {
for ( int j = 0;j < kernelSize;++j ) {
float imgValue = 0.0;
int curRow = row - kernelSize >> 1 + i;
int curCol = col - kernelSize >> 1 + j;
if ( curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
continue;
} else {
imgValue = img[curRow * width + curCol];
}
result[id] += kernel[k * kernelSize * kernelSize + i * kernelSize + j] * imgValue;
}
}
}
// for ( int i = 0;i < kernelSize; ++i ) {
// for ( int j = 0;j < kernelSize; ++j ) {
// float imgValue = 0.0;
// int curRow = row - kernelSize / 2 + i;
// int curCol = col - kernelSize / 2 + j;
// if (curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
// continue;
// } else {
// imgValue = img[curRow * width + curCol];
// }
// result[id] += kernel[i * kernelSize + j] * imgValue;
// }
// }
}
int main(void)
{
int width = 1920;
int height = 1080;
int inChannel = 1;
int outChannel = 8;
float* img = new float[width * height];
for ( int row = 0;row < height;++row ) {
for (int col = 0;col < width;++col ) {
img[col + row * width] = (col + row) % 256;
}
}
int kernelSize = 5;
float* kernel = new float[outChannel * kernelSize * kernelSize];
for ( int i = 0;i < outChannel * kernelSize * kernelSize; ++i) {
kernel[i] = i % kernelSize - 1;
}
float* imgGpu;
float* kernelGpu;
float* resultGpu;
hipMalloc((void**)&imgGpu, inChannel * width * height * sizeof(float));
hipMalloc((void**)&kernelGpu, outChannel * kernelSize * kernelSize * sizeof(float));
hipMalloc((void**)&resultGpu, outChannel * width * height * sizeof(float));
hipMemcpy(imgGpu, img, inChannel * width * height * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(kernelGpu, kernel, outChannel * kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice);
int threadNum = getThreadNum();
int blockNum = (width * height - 0.5) / threadNum + 1;
conv<<<blockNum, threadNum>>>(imgGpu, kernelGpu, resultGpu, width, height,
outChannel, kernelSize);
float* result = new float[outChannel * width * height];
hipMemcpy(result, resultGpu, outChannel * width * height * sizeof(float), hipMemcpyDeviceToHost);
for ( int i = 0;i < 10;++i ) {
for ( int j = 0;j < 10;++j ) {
printf("%2.0f ", img[i * width + j]);
}
printf("\n");
}
printf("\n");
for ( int k = 0;k < 2;++k ) {
for ( int i = 0;i < kernelSize;++i ) {
for ( int j = 0;j < kernelSize;++j ) {
printf("%2.0f ", kernel[k * kernelSize * kernelSize + i * kernelSize + j]);
}
printf("\n");
}
printf("\n\n");
}
for ( int k = 0;k < 2;++k ){
for ( int i = 0;i < 10;++i ) {
for ( int j = 0;j < 10;++j ) {
printf("%2.0f ", result[k * height * width + i * width + j]);
}
printf("\n");
}
printf("\n\n");
}
return 0;
} | .text
.file "main_8155.hip"
.globl _Z12getThreadNumv # -- Begin function _Z12getThreadNumv
.p2align 4, 0x90
.type _Z12getThreadNumv,@function
_Z12getThreadNumv: # @_Z12getThreadNumv
.cfi_startproc
# %bb.0:
subq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 1488
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
movl 4(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq hipGetDevicePropertiesR0600
movl 328(%rsp), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movl 344(%rsp), %esi
movl 348(%rsp), %edx
movl 352(%rsp), %ecx
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl 328(%rsp), %eax
addq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z12getThreadNumv, .Lfunc_end0-_Z12getThreadNumv
.cfi_endproc
# -- End function
.globl _Z19__device_stub__convPfS_S_iiii # -- Begin function _Z19__device_stub__convPfS_S_iiii
.p2align 4, 0x90
.type _Z19__device_stub__convPfS_S_iiii,@function
_Z19__device_stub__convPfS_S_iiii: # @_Z19__device_stub__convPfS_S_iiii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z4convPfS_S_iiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size _Z19__device_stub__convPfS_S_iiii, .Lfunc_end1-_Z19__device_stub__convPfS_S_iiii
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x413fa3ff80000000 # double 2073599.5
.LCPI2_1:
.quad 0x3ff0000000000000 # double 1
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $1592, %rsp # imm = 0x638
.cfi_def_cfa_offset 1648
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $8294400, %edi # imm = 0x7E9000
callq _Znam
movq %rax, %r14
xorl %eax, %eax
movq %r14, %rcx
.p2align 4, 0x90
.LBB2_1: # %.preheader119
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
xorl %edx, %edx
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
leal (%rax,%rdx), %esi
movzbl %sil, %esi
xorps %xmm0, %xmm0
cvtsi2ss %esi, %xmm0
movss %xmm0, (%rcx,%rdx,4)
incq %rdx
cmpq $1920, %rdx # imm = 0x780
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
incq %rax
addq $7680, %rcx # imm = 0x1E00
cmpq $1080, %rax # imm = 0x438
jne .LBB2_1
# %bb.4:
movl $800, %edi # imm = 0x320
callq _Znam
movq %rax, %rbx
movl $-1, %eax
xorl %ecx, %ecx
movl $3435973837, %edx # imm = 0xCCCCCCCD
.p2align 4, 0x90
.LBB2_5: # =>This Inner Loop Header: Depth=1
movl %ecx, %esi
imulq %rdx, %rsi
shrq $34, %rsi
leal (%rsi,%rsi,4), %esi
movl %eax, %edi
subl %esi, %edi
xorps %xmm0, %xmm0
cvtsi2ss %edi, %xmm0
movss %xmm0, (%rbx,%rcx,4)
incq %rcx
incl %eax
cmpq $200, %rcx
jne .LBB2_5
# %bb.6:
leaq 48(%rsp), %rdi
movl $8294400, %esi # imm = 0x7E9000
callq hipMalloc
leaq 40(%rsp), %rdi
movl $800, %esi # imm = 0x320
callq hipMalloc
leaq 32(%rsp), %rdi
movl $66355200, %esi # imm = 0x3F48000
callq hipMalloc
movq 48(%rsp), %rdi
movl $8294400, %edx # imm = 0x7E9000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 40(%rsp), %rdi
movl $800, %edx # imm = 0x320
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
leaq 16(%rsp), %rdi
callq hipGetDeviceCount
movl 16(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
leaq 112(%rsp), %rdi
xorl %esi, %esi
callq hipGetDevicePropertiesR0600
movl 432(%rsp), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movl 448(%rsp), %esi
movl 452(%rsp), %edx
movl 456(%rsp), %ecx
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl 432(%rsp), %edx
xorps %xmm0, %xmm0
cvtsi2sd %edx, %xmm0
movsd .LCPI2_0(%rip), %xmm1 # xmm1 = mem[0],zero
divsd %xmm0, %xmm1
addsd .LCPI2_1(%rip), %xmm1
cvttsd2si %xmm1, %edi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
orq %rax, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_8
# %bb.7:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $1920, 12(%rsp) # imm = 0x780
movl $1080, 8(%rsp) # imm = 0x438
movl $8, 4(%rsp)
movl $5, (%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
movq %rsp, %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 16(%rsp), %rsi
movl 24(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z4convPfS_S_iiii, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_8:
movl $66355200, %edi # imm = 0x3F48000
callq _Znam
movq %rax, %r15
movq 32(%rsp), %rsi
movl $66355200, %edx # imm = 0x3F48000
movq %rax, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_9: # %.preheader118
# =>This Loop Header: Depth=1
# Child Loop BB2_10 Depth 2
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB2_10: # Parent Loop BB2_9 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r14,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
incq %r13
cmpq $10, %r13
jne .LBB2_10
# %bb.11: # in Loop: Header=BB2_9 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r12
addq $7680, %r14 # imm = 0x1E00
cmpq $10, %r12
jne .LBB2_9
# %bb.12:
movl $10, %edi
callq putchar@PLT
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_13: # %.preheader117
# =>This Loop Header: Depth=1
# Child Loop BB2_14 Depth 2
# Child Loop BB2_15 Depth 3
movq %rbx, %r12
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB2_14: # %.preheader116
# Parent Loop BB2_13 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_15 Depth 3
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB2_15: # Parent Loop BB2_13 Depth=1
# Parent Loop BB2_14 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r12,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
incq %rbp
cmpq $5, %rbp
jne .LBB2_15
# %bb.16: # in Loop: Header=BB2_14 Depth=2
movl $10, %edi
callq putchar@PLT
incq %r13
addq $20, %r12
cmpq $5, %r13
jne .LBB2_14
# %bb.17: # in Loop: Header=BB2_13 Depth=1
movl $.Lstr.1, %edi
callq puts@PLT
leaq 1(%r14), %rax
addq $100, %rbx
testq %r14, %r14
movq %rax, %r14
je .LBB2_13
# %bb.18: # %.preheader114.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_19: # %.preheader114
# =>This Loop Header: Depth=1
# Child Loop BB2_20 Depth 2
# Child Loop BB2_21 Depth 3
movq %r15, %r14
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_20: # %.preheader
# Parent Loop BB2_19 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_21 Depth 3
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB2_21: # Parent Loop BB2_19 Depth=1
# Parent Loop BB2_20 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r14,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
incq %r13
cmpq $10, %r13
jne .LBB2_21
# %bb.22: # in Loop: Header=BB2_20 Depth=2
movl $10, %edi
callq putchar@PLT
incq %r12
addq $7680, %r14 # imm = 0x1E00
cmpq $10, %r12
jne .LBB2_20
# %bb.23: # in Loop: Header=BB2_19 Depth=1
movl $.Lstr.1, %edi
callq puts@PLT
leaq 1(%rbx), %rax
addq $8294400, %r15 # imm = 0x7E9000
testq %rbx, %rbx
movq %rax, %rbx
je .LBB2_19
# %bb.24:
xorl %eax, %eax
addq $1592, %rsp # imm = 0x638
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4convPfS_S_iiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "gpu num %d\n"
.size .L.str, 12
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "max thread num : %d\n"
.size .L.str.1, 21
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "grid dimensions : %d %d %d\n"
.size .L.str.2, 28
.type _Z4convPfS_S_iiii,@object # @_Z4convPfS_S_iiii
.section .rodata,"a",@progbits
.globl _Z4convPfS_S_iiii
.p2align 3, 0x0
_Z4convPfS_S_iiii:
.quad _Z19__device_stub__convPfS_S_iiii
.size _Z4convPfS_S_iiii, 8
.type .L.str.3,@object # @.str.3
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.3:
.asciz "%2.0f "
.size .L.str.3, 7
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4convPfS_S_iiii"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr.1,@object # @str.1
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr.1:
.asciz "\n"
.size .Lstr.1, 2
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__convPfS_S_iiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4convPfS_S_iiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z4convPfS_S_iiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x178] ; /* 0x00005e0000047ab9 */
/* 0x000fe40000000a00 */
/*0030*/ UIMAD UR4, UR5, UR4, URZ ; /* 0x00000004050472a4 */
/* 0x000fe2000f8e023f */
/*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0050*/ ULDC UR5, c[0x0][0x180] ; /* 0x0000600000057ab9 */
/* 0x000fe40000000800 */
/*0060*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fe2000f8e023f */
/*0070*/ IMAD R5, R5, c[0x0][0x0], R0 ; /* 0x0000000005057a24 */
/* 0x001fca00078e0200 */
/*0080*/ ISETP.GE.AND P0, PT, R5, UR4, PT ; /* 0x0000000405007c0c */
/* 0x000fda000bf06270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IMAD.MOV.U32 R6, RZ, RZ, 0x1 ; /* 0x00000001ff067424 */
/* 0x000fca00078e00ff */
/*00b0*/ ISETP.LE.AND P0, PT, R6, c[0x0][0x180], PT ; /* 0x0000600006007a0c */
/* 0x000fda0003f03270 */
/*00c0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00d0*/ IABS R7, c[0x0][0x178] ; /* 0x00005e0000077a13 */
/* 0x000fe40000000000 */
/*00e0*/ ISETP.LE.AND P0, PT, R6, c[0x0][0x184], PT ; /* 0x0000610006007a0c */
/* 0x000fe40003f03270 */
/*00f0*/ I2F.RP R0, R7 ; /* 0x0000000700007306 */
/* 0x000e300000209400 */
/*0100*/ MUFU.RCP R0, R0 ; /* 0x0000000000007308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R2, R0, 0xffffffe, RZ ; /* 0x0ffffffe00027810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*0130*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x001fe400078e00ff */
/*0140*/ IMAD.MOV R4, RZ, RZ, -R3 ; /* 0x000000ffff047224 */
/* 0x002fc800078e0a03 */
/*0150*/ IMAD R9, R4, R7, RZ ; /* 0x0000000704097224 */
/* 0x000fe200078e02ff */
/*0160*/ IABS R4, R5 ; /* 0x0000000500047213 */
/* 0x000fc60000000000 */
/*0170*/ IMAD.HI.U32 R3, R3, R9, R2 ; /* 0x0000000903037227 */
/* 0x000fcc00078e0002 */
/*0180*/ IMAD.HI.U32 R3, R3, R4, RZ ; /* 0x0000000403037227 */
/* 0x000fc800078e00ff */
/*0190*/ IMAD.MOV R0, RZ, RZ, -R3 ; /* 0x000000ffff007224 */
/* 0x000fc800078e0a03 */
/*01a0*/ IMAD R0, R7, R0, R4 ; /* 0x0000000007007224 */
/* 0x000fca00078e0204 */
/*01b0*/ ISETP.GT.U32.AND P3, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f64070 */
/*01c0*/ @!P3 IMAD.IADD R0, R0, 0x1, -R7 ; /* 0x000000010000b824 */
/* 0x000fe200078e0a07 */
/*01d0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fec0003800000 */
/*01e0*/ ISETP.GE.U32.AND P2, PT, R0, R7, PT ; /* 0x000000070000720c */
/* 0x000fe20003f46070 */
/*01f0*/ IMAD.MOV.U32 R8, RZ, RZ, 0x4 ; /* 0x00000004ff087424 */
/* 0x000fe200078e00ff */
/*0200*/ LOP3.LUT R2, R5.reuse, c[0x0][0x178], RZ, 0x3c, !PT ; /* 0x00005e0005027a12 */
/* 0x040fe200078e3cff */
/*0210*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fe200078e00ff */
/*0220*/ ISETP.GE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f06270 */
/*0230*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0240*/ ISETP.GE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe40003f26270 */
/*0250*/ @!P3 IADD3 R3, R3, 0x1, RZ ; /* 0x000000010303b810 */
/* 0x000fe40007ffe0ff */
/*0260*/ ISETP.GT.U32.AND P3, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fe20003f64070 */
/*0270*/ IMAD.IADD R7, R0, 0x1, -R7 ; /* 0x0000000100077824 */
/* 0x000fe200078e0a07 */
/*0280*/ LOP3.LUT R2, RZ, c[0x0][0x178], RZ, 0x33, !PT ; /* 0x00005e00ff027a12 */
/* 0x000fc400078e33ff */
/*0290*/ @P2 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103032810 */
/* 0x000fe40007ffe0ff */
/*02a0*/ SEL R7, R7, R0, !P3 ; /* 0x0000000007077207 */
/* 0x000fe20005800000 */
/*02b0*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff007624 */
/* 0x000fe200078e00ff */
/*02c0*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x178], PT ; /* 0x00005e00ff007a0c */
/* 0x000fe20003f45270 */
/*02d0*/ @!P1 IMAD.MOV R3, RZ, RZ, -R3 ; /* 0x000000ffff039224 */
/* 0x000fe400078e0a03 */
/*02e0*/ @!P0 IMAD.MOV R7, RZ, RZ, -R7 ; /* 0x000000ffff078224 */
/* 0x000fe200078e0a07 */
/*02f0*/ LOP3.LUT R0, R0, 0x3, RZ, 0xc0, !PT ; /* 0x0000000300007812 */
/* 0x000fe400078ec0ff */
/*0300*/ SEL R6, R2, R3, !P2 ; /* 0x0000000302067207 */
/* 0x000fc40005000000 */
/*0310*/ SEL R7, R2, R7, !P2 ; /* 0x0000000702077207 */
/* 0x000fe20005000000 */
/*0320*/ IMAD.WIDE R2, R5, R8, c[0x0][0x170] ; /* 0x00005c0005027625 */
/* 0x000fe200078e0208 */
/*0330*/ IADD3 R16, R0, -c[0x0][0x184], RZ ; /* 0x8000610000107a10 */
/* 0x000fe40007ffe0ff */
/*0340*/ IADD3 R5, R7, -c[0x0][0x184], RZ ; /* 0x8000610007057a10 */
/* 0x000fe40007ffe0ff */
/*0350*/ IADD3 R6, R6, -c[0x0][0x184], RZ ; /* 0x8000610006067a10 */
/* 0x000fe40007ffe0ff */
/*0360*/ IMAD.MOV.U32 R13, RZ, RZ, RZ ; /* 0x000000ffff0d7224 */
/* 0x001fca00078e00ff */
/*0370*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff077624 */
/* 0x001fe200078e00ff */
/*0380*/ IADD3 R11, R13, 0x1, RZ ; /* 0x000000010d0b7810 */
/* 0x000fe20007ffe0ff */
/*0390*/ IMAD R10, R4, c[0x0][0x184], R13 ; /* 0x00006100040a7a24 */
/* 0x000fe200078e020d */
/*03a0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*03b0*/ ISETP.NE.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fc40003f45270 */
/*03c0*/ IADD3 R7, R7, -0x1, RZ ; /* 0xffffffff07077810 */
/* 0x000fe20007ffe0ff */
/*03d0*/ IMAD R10, R10, c[0x0][0x184], RZ ; /* 0x000061000a0a7a24 */
/* 0x000fe200078e02ff */
/*03e0*/ ISETP.GE.AND P3, PT, R11, c[0x0][0x184], PT ; /* 0x000061000b007a0c */
/* 0x000fe40003f66270 */
/*03f0*/ ISETP.GE.U32.AND P0, PT, R7, 0x3, PT ; /* 0x000000030700780c */
/* 0x000fe40003f06070 */
/*0400*/ SHF.R.S32.HI R7, RZ, R11, R6 ; /* 0x0000000bff077219 */
/* 0x000fd60000011406 */
/*0410*/ @!P0 BRA 0x7f0 ; /* 0x000003d000008947 */
/* 0x000fea0003800000 */
/*0420*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4 ; /* 0x00000004ff097424 */
/* 0x000fe200078e00ff */
/*0430*/ ISETP.GE.AND P4, PT, R7, c[0x0][0x17c], PT ; /* 0x00005f0007007a0c */
/* 0x000fe20003f86270 */
/*0440*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe40008000000 */
/*0450*/ IMAD.WIDE R8, R10, R9, c[0x0][0x168] ; /* 0x00005a000a087625 */
/* 0x000fc800078e0209 */
/*0460*/ UIADD3 UR5, UR4, 0x1, URZ ; /* 0x0000000104057890 */
/* 0x000fcc000fffe03f */
/*0470*/ SHF.R.S32.HI R12, RZ, UR5, R5 ; /* 0x00000005ff0c7c19 */
/* 0x000fc80008011405 */
/*0480*/ LOP3.LUT R13, R12, R7, RZ, 0xfc, !PT ; /* 0x000000070c0d7212 */
/* 0x000fc800078efcff */
/*0490*/ ISETP.LT.OR P1, PT, R13, RZ, P4 ; /* 0x000000ff0d00720c */
/* 0x000fc80002721670 */
/*04a0*/ ISETP.GE.OR P1, PT, R12, c[0x0][0x178], P1 ; /* 0x00005e000c007a0c */
/* 0x000fda0000f26670 */
/*04b0*/ @!P1 IMAD.MOV.U32 R13, RZ, RZ, 0x4 ; /* 0x00000004ff0d9424 */
/* 0x000fe200078e00ff */
/*04c0*/ @!P1 LDG.E R14, [R8.64] ; /* 0x00000006080e9981 */
/* 0x000ea2000c1e1900 */
/*04d0*/ @!P1 IMAD R12, R7, c[0x0][0x178], R12 ; /* 0x00005e00070c9a24 */
/* 0x000fc600078e020c */
/*04e0*/ @!P1 LDG.E R15, [R2.64] ; /* 0x00000006020f9981 */
/* 0x000ea2000c1e1900 */
/*04f0*/ @!P1 IMAD.WIDE R12, R12, R13, c[0x0][0x160] ; /* 0x000058000c0c9625 */
/* 0x000fcc00078e020d */
/*0500*/ @!P1 LDG.E R13, [R12.64] ; /* 0x000000060c0d9981 */
/* 0x000ea2000c1e1900 */
/*0510*/ UIADD3 UR5, UR4, 0x2, URZ ; /* 0x0000000204057890 */
/* 0x000fcc000fffe03f */
/*0520*/ SHF.R.S32.HI R18, RZ, UR5, R5 ; /* 0x00000005ff127c19 */
/* 0x000fc80008011405 */
/*0530*/ LOP3.LUT R17, R18, R7, RZ, 0xfc, !PT ; /* 0x0000000712117212 */
/* 0x000fc800078efcff */
/*0540*/ ISETP.LT.OR P0, PT, R17, RZ, P4 ; /* 0x000000ff1100720c */
/* 0x000fc80002701670 */
/*0550*/ ISETP.GE.OR P0, PT, R18, c[0x0][0x178], P0 ; /* 0x00005e0012007a0c */
/* 0x000fda0000706670 */
/*0560*/ @!P0 IMAD.MOV.U32 R19, RZ, RZ, 0x4 ; /* 0x00000004ff138424 */
/* 0x000fe400078e00ff */
/*0570*/ @!P0 IMAD R18, R7, c[0x0][0x178], R18 ; /* 0x00005e0007128a24 */
/* 0x000fe400078e0212 */
/*0580*/ @!P1 FFMA R17, R14, R13, R15 ; /* 0x0000000d0e119223 */
/* 0x004fe4000000000f */
/*0590*/ @!P0 IMAD.WIDE R14, R18, R19, c[0x0][0x160] ; /* 0x00005800120e8625 */
/* 0x000fc600078e0213 */
/*05a0*/ @!P1 STG.E [R2.64], R17 ; /* 0x0000001102009986 */
/* 0x0001e8000c101906 */
/*05b0*/ @!P0 LDG.E R15, [R14.64] ; /* 0x000000060e0f8981 */
/* 0x000e28000c1e1900 */
/*05c0*/ @!P0 LDG.E R12, [R8.64+0x4] ; /* 0x00000406080c8981 */
/* 0x000e28000c1e1900 */
/*05d0*/ @!P0 LDG.E R18, [R2.64] ; /* 0x0000000602128981 */
/* 0x000e22000c1e1900 */
/*05e0*/ UIADD3 UR5, UR4, 0x3, URZ ; /* 0x0000000304057890 */
/* 0x000fcc000fffe03f */
/*05f0*/ SHF.R.S32.HI R20, RZ, UR5, R5 ; /* 0x00000005ff147c19 */
/* 0x000fc80008011405 */
/*0600*/ LOP3.LUT R13, R20, R7, RZ, 0xfc, !PT ; /* 0x00000007140d7212 */
/* 0x000fc800078efcff */
/*0610*/ ISETP.LT.OR P1, PT, R13, RZ, P4 ; /* 0x000000ff0d00720c */
/* 0x000fc80002721670 */
/*0620*/ ISETP.GE.OR P1, PT, R20, c[0x0][0x178], P1 ; /* 0x00005e0014007a0c */
/* 0x000fda0000f26670 */
/*0630*/ @!P1 IMAD.MOV.U32 R22, RZ, RZ, 0x4 ; /* 0x00000004ff169424 */
/* 0x000fe400078e00ff */
/*0640*/ @!P1 IMAD R13, R7, c[0x0][0x178], R20 ; /* 0x00005e00070d9a24 */
/* 0x000fe400078e0214 */
/*0650*/ @!P0 FFMA R17, R12, R15, R18 ; /* 0x0000000f0c118223 */
/* 0x001fe40000000012 */
/*0660*/ @!P1 IMAD.WIDE R12, R13, R22, c[0x0][0x160] ; /* 0x000058000d0c9625 */
/* 0x000fc600078e0216 */
/*0670*/ @!P0 STG.E [R2.64], R17 ; /* 0x0000001102008986 */
/* 0x0001e8000c101906 */
/*0680*/ @!P1 LDG.E R13, [R12.64] ; /* 0x000000060c0d9981 */
/* 0x000ea8000c1e1900 */
/*0690*/ @!P1 LDG.E R14, [R8.64+0x8] ; /* 0x00000806080e9981 */
/* 0x000ea8000c1e1900 */
/*06a0*/ @!P1 LDG.E R19, [R2.64] ; /* 0x0000000602139981 */
/* 0x000ea2000c1e1900 */
/*06b0*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fcc000fffe03f */
/*06c0*/ SHF.R.S32.HI R18, RZ, UR4, R5 ; /* 0x00000004ff127c19 */
/* 0x000fc80008011405 */
/*06d0*/ LOP3.LUT R15, R18, R7, RZ, 0xfc, !PT ; /* 0x00000007120f7212 */
/* 0x000fc800078efcff */
/*06e0*/ ISETP.LT.OR P0, PT, R15, RZ, P4 ; /* 0x000000ff0f00720c */
/* 0x000fc80002701670 */
/*06f0*/ ISETP.GE.OR P0, PT, R18, c[0x0][0x178], P0 ; /* 0x00005e0012007a0c */
/* 0x000fda0000706670 */
/*0700*/ @!P0 IMAD.MOV.U32 R21, RZ, RZ, 0x4 ; /* 0x00000004ff158424 */
/* 0x000fe400078e00ff */
/*0710*/ @!P0 IMAD R18, R7, c[0x0][0x178], R18 ; /* 0x00005e0007128a24 */
/* 0x000fe400078e0212 */
/*0720*/ @!P1 FFMA R15, R14, R13, R19 ; /* 0x0000000d0e0f9223 */
/* 0x004fe40000000013 */
/*0730*/ @!P0 IMAD.WIDE R12, R18, R21, c[0x0][0x160] ; /* 0x00005800120c8625 */
/* 0x000fc600078e0215 */
/*0740*/ @!P1 STG.E [R2.64], R15 ; /* 0x0000000f02009986 */
/* 0x0003e8000c101906 */
/*0750*/ @!P0 LDG.E R13, [R12.64] ; /* 0x000000060c0d8981 */
/* 0x000ea8000c1e1900 */
/*0760*/ @!P0 LDG.E R14, [R8.64+0xc] ; /* 0x00000c06080e8981 */
/* 0x0006a8000c1e1900 */
/*0770*/ @!P0 LDG.E R17, [R2.64] ; /* 0x0000000602118981 */
/* 0x001ea2000c1e1900 */
/*0780*/ IADD3 R8, P1, R8, 0x10, RZ ; /* 0x0000001008087810 */
/* 0x008fca0007f3e0ff */
/*0790*/ IMAD.X R9, RZ, RZ, R9, P1 ; /* 0x000000ffff097224 */
/* 0x000fe400008e0609 */
/*07a0*/ @!P0 FFMA R17, R14, R13, R17 ; /* 0x0000000d0e118223 */
/* 0x004fe20000000011 */
/*07b0*/ IADD3 R14, R16, UR4, RZ ; /* 0x00000004100e7c10 */
/* 0x000fc8000fffe0ff */
/*07c0*/ @!P0 STG.E [R2.64], R17 ; /* 0x0000001102008986 */
/* 0x0003e2000c101906 */
/*07d0*/ ISETP.NE.AND P0, PT, R14, RZ, PT ; /* 0x000000ff0e00720c */
/* 0x000fda0003f05270 */
/*07e0*/ @P0 BRA 0x460 ; /* 0xfffffc7000000947 */
/* 0x002fea000383ffff */
/*07f0*/ IMAD.MOV.U32 R13, RZ, RZ, R11 ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e000b */
/*0800*/ @!P2 BRA 0xb60 ; /* 0x000003500000a947 */
/* 0x000fea0003800000 */
/*0810*/ UIADD3 UR5, UR4, 0x1, URZ ; /* 0x0000000104057890 */
/* 0x000fe2000fffe03f */
/*0820*/ ISETP.GE.AND P0, PT, R7, c[0x0][0x17c], PT ; /* 0x00005f0007007a0c */
/* 0x000fe20003f06270 */
/*0830*/ BSSY B0, 0x950 ; /* 0x0000011000007945 */
/* 0x000fe20003800000 */
/*0840*/ IADD3 R9, R10, UR4, RZ ; /* 0x000000040a097c10 */
/* 0x000fe2000fffe0ff */
/*0850*/ IMAD.MOV.U32 R10, RZ, RZ, 0x4 ; /* 0x00000004ff0a7424 */
/* 0x000fe200078e00ff */
/*0860*/ ISETP.NE.AND P2, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fe40003f45270 */
/*0870*/ SHF.R.S32.HI R12, RZ, UR5, R5 ; /* 0x00000005ff0c7c19 */
/* 0x000fc80008011405 */
/*0880*/ LOP3.LUT R8, R12, R7, RZ, 0xfc, !PT ; /* 0x000000070c087212 */
/* 0x000fc800078efcff */
/*0890*/ ISETP.LT.OR P1, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */
/* 0x000fe20000721670 */
/*08a0*/ IMAD.WIDE R8, R9, R10, c[0x0][0x168] ; /* 0x00005a0009087625 */
/* 0x000fc600078e020a */
/*08b0*/ ISETP.GE.OR P1, PT, R12, c[0x0][0x178], P1 ; /* 0x00005e000c007a0c */
/* 0x000fda0000f26670 */
/*08c0*/ @P1 BRA 0x940 ; /* 0x0000007000001947 */
/* 0x000fea0003800000 */
/*08d0*/ IMAD R15, R7, c[0x0][0x178], R12 ; /* 0x00005e00070f7a24 */
/* 0x000fe200078e020c */
/*08e0*/ LDG.E R11, [R8.64] ; /* 0x00000006080b7981 */
/* 0x000ea6000c1e1900 */
/*08f0*/ IMAD.WIDE R14, R15, R10, c[0x0][0x160] ; /* 0x000058000f0e7625 */
/* 0x000fe200078e020a */
/*0900*/ LDG.E R12, [R2.64] ; /* 0x00000006020c7981 */
/* 0x000eaa000c1e1900 */
/*0910*/ LDG.E R14, [R14.64] ; /* 0x000000060e0e7981 */
/* 0x000ea4000c1e1900 */
/*0920*/ FFMA R11, R11, R14, R12 ; /* 0x0000000e0b0b7223 */
/* 0x004fca000000000c */
/*0930*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x0001e4000c101906 */
/*0940*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0950*/ @!P2 BRA 0xb60 ; /* 0x000002000000a947 */
/* 0x000fea0003800000 */
/*0960*/ UIADD3 UR5, UR4, 0x2, URZ ; /* 0x0000000204057890 */
/* 0x000fe2000fffe03f */
/*0970*/ BSSY B0, 0xa60 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*0980*/ ISETP.NE.AND P2, PT, R0, 0x2, PT ; /* 0x000000020000780c */
/* 0x000fc80003f45270 */
/*0990*/ SHF.R.S32.HI R12, RZ, UR5, R5 ; /* 0x00000005ff0c7c19 */
/* 0x000fc80008011405 */
/*09a0*/ LOP3.LUT R11, R12, R7, RZ, 0xfc, !PT ; /* 0x000000070c0b7212 */
/* 0x001fc800078efcff */
/*09b0*/ ISETP.LT.OR P1, PT, R11, RZ, P0 ; /* 0x000000ff0b00720c */
/* 0x000fc80000721670 */
/*09c0*/ ISETP.GE.OR P1, PT, R12, c[0x0][0x178], P1 ; /* 0x00005e000c007a0c */
/* 0x000fda0000f26670 */
/*09d0*/ @P1 BRA 0xa50 ; /* 0x0000007000001947 */
/* 0x000fea0003800000 */
/*09e0*/ IMAD R15, R7, c[0x0][0x178], R12 ; /* 0x00005e00070f7a24 */
/* 0x000fe200078e020c */
/*09f0*/ LDG.E R11, [R8.64+0x4] ; /* 0x00000406080b7981 */
/* 0x000ea6000c1e1900 */
/*0a00*/ IMAD.WIDE R14, R15, R10, c[0x0][0x160] ; /* 0x000058000f0e7625 */
/* 0x000fe200078e020a */
/*0a10*/ LDG.E R12, [R2.64] ; /* 0x00000006020c7981 */
/* 0x000eaa000c1e1900 */
/*0a20*/ LDG.E R14, [R14.64] ; /* 0x000000060e0e7981 */
/* 0x000ea4000c1e1900 */
/*0a30*/ FFMA R11, R11, R14, R12 ; /* 0x0000000e0b0b7223 */
/* 0x004fca000000000c */
/*0a40*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x0001e4000c101906 */
/*0a50*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0a60*/ @!P2 BRA 0xb60 ; /* 0x000000f00000a947 */
/* 0x000fea0003800000 */
/*0a70*/ UIADD3 UR4, UR4, 0x3, URZ ; /* 0x0000000304047890 */
/* 0x000fe2000fffe03f */
/*0a80*/ BSSY B0, 0xb60 ; /* 0x000000d000007945 */
/* 0x000fea0003800000 */
/*0a90*/ SHF.R.S32.HI R12, RZ, UR4, R5 ; /* 0x00000004ff0c7c19 */
/* 0x000fc80008011405 */
/*0aa0*/ LOP3.LUT R11, R12, R7, RZ, 0xfc, !PT ; /* 0x000000070c0b7212 */
/* 0x001fc800078efcff */
/*0ab0*/ ISETP.LT.OR P0, PT, R11, RZ, P0 ; /* 0x000000ff0b00720c */
/* 0x000fc80000701670 */
/*0ac0*/ ISETP.GE.OR P0, PT, R12, c[0x0][0x178], P0 ; /* 0x00005e000c007a0c */
/* 0x000fda0000706670 */
/*0ad0*/ @P0 BRA 0xb50 ; /* 0x0000007000000947 */
/* 0x000fea0003800000 */
/*0ae0*/ IMAD R11, R7, c[0x0][0x178], R12 ; /* 0x00005e00070b7a24 */
/* 0x000fe200078e020c */
/*0af0*/ LDG.E R8, [R8.64+0x8] ; /* 0x0000080608087981 */
/* 0x000ea6000c1e1900 */
/*0b00*/ IMAD.WIDE R10, R11, R10, c[0x0][0x160] ; /* 0x000058000b0a7625 */
/* 0x000fe200078e020a */
/*0b10*/ LDG.E R7, [R2.64] ; /* 0x0000000602077981 */
/* 0x000eaa000c1e1900 */
/*0b20*/ LDG.E R11, [R10.64] ; /* 0x000000060a0b7981 */
/* 0x000ea4000c1e1900 */
/*0b30*/ FFMA R7, R8, R11, R7 ; /* 0x0000000b08077223 */
/* 0x004fca0000000007 */
/*0b40*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e4000c101906 */
/*0b50*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0b60*/ @!P3 BRA 0x370 ; /* 0xfffff8000000b947 */
/* 0x000fea000383ffff */
/*0b70*/ IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104047810 */
/* 0x000fc80007ffe0ff */
/*0b80*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x180], PT ; /* 0x0000600004007a0c */
/* 0x000fda0003f06270 */
/*0b90*/ @!P0 BRA 0x360 ; /* 0xfffff7c000008947 */
/* 0x000fea000383ffff */
/*0ba0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0bb0*/ BRA 0xbb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0bc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0bd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0be0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0bf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4convPfS_S_iiii
.globl _Z4convPfS_S_iiii
.p2align 8
.type _Z4convPfS_S_iiii,@function
_Z4convPfS_S_iiii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x34
s_load_b64 s[8:9], s[0:1], 0x18
s_load_b32 s3, s[0:1], 0x20
s_mov_b32 s10, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mul_i32 s2, s9, s8
s_cmp_gt_i32 s3, 0
s_mul_i32 s2, s2, s3
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v1
s_cselect_b32 s2, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB0_10
s_ashr_i32 s2, s8, 31
v_ashrrev_i32_e32 v3, 31, v1
s_add_i32 s4, s8, s2
s_clause 0x1
s_load_b32 s11, s[0:1], 0x24
s_load_b64 s[12:13], s[0:1], 0x10
s_xor_b32 s4, s4, s2
s_mov_b32 s14, 0
v_cvt_f32_u32_e32 v0, s4
s_sub_i32 s5, 0, s4
v_add_nc_u32_e32 v4, v1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v0, v0
v_xor_b32_e32 v4, v4, v3
v_xor_b32_e32 v3, s2, v3
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x4f7ffffe, v0
s_waitcnt lgkmcnt(0)
s_cmp_gt_i32 s11, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v0, v0
v_mul_lo_u32 v2, s5, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v2, v0, v2
v_add_nc_u32_e32 v0, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v0, v4, v0
v_mul_lo_u32 v2, v0, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v2, v4, v2
v_add_nc_u32_e32 v4, 1, v0
v_subrev_nc_u32_e32 v5, s4, v2
v_cmp_le_u32_e32 vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v0, v0, v4, vcc_lo
v_cndmask_b32_e32 v2, v2, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v4, 1, v0
v_cmp_le_u32_e32 vcc_lo, s4, v2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v0, v0, v4, vcc_lo
v_xor_b32_e32 v0, v0, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_sub_nc_u32_e32 v4, v0, v3
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, v4, s8
v_sub_nc_u32_e32 v5, v1, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v0, vcc_lo, s12, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s13, v3, vcc_lo
v_subrev_nc_u32_e32 v2, s11, v4
s_delay_alu instid0(VALU_DEP_4)
v_subrev_nc_u32_e32 v3, s11, v5
v_mov_b32_e32 v4, 0
s_cselect_b32 s12, -1, 0
s_mul_i32 s13, s11, s11
s_branch .LBB0_3
.LBB0_2:
s_add_i32 s14, s14, 1
s_add_i32 s10, s10, s13
s_cmp_eq_u32 s14, s3
s_cbranch_scc1 .LBB0_10
.LBB0_3:
s_and_not1_b32 vcc_lo, exec_lo, s12
s_cbranch_vccnz .LBB0_2
s_mov_b32 s15, 0
s_mov_b32 s16, s10
s_branch .LBB0_6
.LBB0_5:
s_set_inst_prefetch_distance 0x2
s_add_i32 s16, s16, s11
s_cmp_eq_u32 s15, s11
s_cbranch_scc1 .LBB0_2
.LBB0_6:
s_add_i32 s15, s15, 1
s_mov_b32 s18, 0
v_ashrrev_i32_e32 v6, s15, v2
s_delay_alu instid0(VALU_DEP_1)
v_mul_lo_u32 v5, v6, s8
v_cmp_lt_i32_e32 vcc_lo, -1, v6
v_cmp_gt_i32_e64 s0, s9, v6
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_8
.p2align 6
.LBB0_7:
s_or_b32 exec_lo, exec_lo, s2
s_cmp_eq_u32 s11, s17
s_mov_b32 s18, s17
s_cbranch_scc1 .LBB0_5
.LBB0_8:
s_add_i32 s17, s18, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v6, s17, v3
v_cmp_lt_i32_e64 s1, -1, v6
v_cmp_gt_i32_e64 s2, s8, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s1, vcc_lo, s1
s_and_b32 s1, s0, s1
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s1, s1, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s2, s1
s_cbranch_execz .LBB0_7
v_add_nc_u32_e32 v6, v6, v5
s_add_i32 s18, s16, s18
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s19, s18, 31
s_lshl_b64 s[18:19], s[18:19], 2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v7, 31, v6
s_waitcnt lgkmcnt(0)
s_add_u32 s18, s6, s18
s_addc_u32 s19, s7, s19
v_lshlrev_b64 v[6:7], 2, v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v6, s1, s4, v6
v_add_co_ci_u32_e64 v7, s1, s5, v7, s1
global_load_b32 v8, v4, s[18:19]
global_load_b32 v6, v[6:7], off
global_load_b32 v7, v[0:1], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v7, v6, v8
global_store_b32 v[0:1], v7, off
s_branch .LBB0_7
.LBB0_10:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4convPfS_S_iiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 20
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4convPfS_S_iiii, .Lfunc_end0-_Z4convPfS_S_iiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4convPfS_S_iiii
.private_segment_fixed_size: 0
.sgpr_count: 22
.sgpr_spill_count: 0
.symbol: _Z4convPfS_S_iiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0001e30a_00000000-6_main_8155.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "gpu num %d\n"
.LC1:
.string "max thread num : %d\n"
.LC2:
.string "grid dimensions : %d %d %d\n"
.text
.globl _Z12getThreadNumv
.type _Z12getThreadNumv, @function
_Z12getThreadNumv:
.LFB2057:
.cfi_startproc
endbr64
subq $1064, %rsp
.cfi_def_cfa_offset 1072
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 16(%rsp), %rdi
movl $0, %esi
call cudaGetDeviceProperties_v2@PLT
movl 336(%rsp), %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 360(%rsp), %r8d
movl 356(%rsp), %ecx
movl 352(%rsp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %eax
movq 1048(%rsp), %rdx
subq %fs:40, %rdx
jne .L6
addq $1064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z12getThreadNumv, .-_Z12getThreadNumv
.globl _Z31__device_stub__Z4convPfS_S_iiiiPfS_S_iiii
.type _Z31__device_stub__Z4convPfS_S_iiiiPfS_S_iiii, @function
_Z31__device_stub__Z4convPfS_S_iiiiPfS_S_iiii:
.LFB2083:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z4convPfS_S_iiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z31__device_stub__Z4convPfS_S_iiiiPfS_S_iiii, .-_Z31__device_stub__Z4convPfS_S_iiiiPfS_S_iiii
.globl _Z4convPfS_S_iiii
.type _Z4convPfS_S_iiii, @function
_Z4convPfS_S_iiii:
.LFB2084:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z31__device_stub__Z4convPfS_S_iiiiPfS_S_iiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z4convPfS_S_iiii, .-_Z4convPfS_S_iiii
.section .rodata.str1.1
.LC5:
.string "%2.0f "
.LC6:
.string "\n"
.LC7:
.string "\n\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $8294400, %edi
call _Znam@PLT
movq %rax, %r14
movq %rax, %r8
movl $1920, %edi
.L16:
leal -1920(%rdi), %edx
movq %r8, %rsi
.L17:
movl %edx, %ecx
sarl $31, %ecx
shrl $24, %ecx
leal (%rdx,%rcx), %eax
movzbl %al, %eax
subl %ecx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rsi)
addl $1, %edx
addq $4, %rsi
cmpl %edi, %edx
jne .L17
addq $7680, %r8
addl $1, %edi
cmpl $3000, %edi
jne .L16
movl $800, %edi
call _Znam@PLT
movq %rax, %rbp
movl $0, %edx
.L19:
movslq %edx, %rax
imulq $1717986919, %rax, %rax
sarq $33, %rax
movl %edx, %ecx
sarl $31, %ecx
subl %ecx, %eax
leal (%rax,%rax,4), %ecx
movl %edx, %eax
subl %ecx, %eax
subl $1, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rdx,4)
addq $1, %rdx
cmpq $200, %rdx
jne .L19
leaq 24(%rsp), %rdi
movl $8294400, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $800, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $66355200, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $8294400, %edx
movq %r14, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $800, %edx
movq %rbp, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
call _Z12getThreadNumv
movl %eax, 60(%rsp)
movl $1, 64(%rsp)
pxor %xmm1, %xmm1
cvtsi2sdl %eax, %xmm1
movsd .LC3(%rip), %xmm0
divsd %xmm1, %xmm0
addsd .LC4(%rip), %xmm0
cvttsd2sil %xmm0, %eax
movl %eax, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L41
.L20:
movl $66355200, %edi
call _Znam@PLT
movq %rax, 8(%rsp)
movl $2, %ecx
movl $66355200, %edx
movq 40(%rsp), %rsi
movq %rax, %rdi
call cudaMemcpy@PLT
leaq 40(%r14), %r12
addq $76840, %r14
leaq .LC5(%rip), %r13
leaq .LC6(%rip), %r15
.L21:
leaq -40(%r12), %rbx
.L22:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L22
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $7680, %r12
cmpq %r14, %r12
jne .L21
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $20, %rbp
movl $25, %r14d
leaq .LC5(%rip), %r13
jmp .L24
.L41:
subq $8, %rsp
.cfi_def_cfa_offset 152
pushq $5
.cfi_def_cfa_offset 160
movl $8, %r9d
movl $1080, %r8d
movl $1920, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z31__device_stub__Z4convPfS_S_iiiiPfS_S_iiii
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L20
.L42:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $5, %r15d
addq $20, %r12
cmpl %r14d, %r15d
je .L26
.L28:
leaq -20(%r12), %rbx
.L25:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L25
jmp .L42
.L26:
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $25, %r14d
addq $100, %rbp
cmpl $75, %r14d
je .L43
.L24:
leal -25(%r14), %r15d
movq %rbp, %r12
jmp .L28
.L43:
movq 8(%rsp), %rax
addq $40, %rax
movq %rax, 8(%rsp)
movl $0, %r14d
leaq .LC5(%rip), %r12
jmp .L27
.L44:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1920, %r13d
addq $7680, %rbp
cmpl %r15d, %r13d
je .L30
.L32:
leaq -40(%rbp), %rbx
.L29:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L29
jmp .L44
.L30:
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $2073600, %r14d
addq $8294400, 8(%rsp)
cmpl $4147200, %r14d
je .L31
.L27:
movq 8(%rsp), %rbp
movl %r14d, %r13d
leal 19200(%r14), %r15d
jmp .L32
.L31:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L45
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z4convPfS_S_iiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z4convPfS_S_iiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC3:
.long -2147483648
.long 1094689791
.align 8
.LC4:
.long 0
.long 1072693248
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "main_8155.hip"
.globl _Z12getThreadNumv # -- Begin function _Z12getThreadNumv
.p2align 4, 0x90
.type _Z12getThreadNumv,@function
_Z12getThreadNumv: # @_Z12getThreadNumv
.cfi_startproc
# %bb.0:
subq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 1488
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
movl 4(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq hipGetDevicePropertiesR0600
movl 328(%rsp), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movl 344(%rsp), %esi
movl 348(%rsp), %edx
movl 352(%rsp), %ecx
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl 328(%rsp), %eax
addq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z12getThreadNumv, .Lfunc_end0-_Z12getThreadNumv
.cfi_endproc
# -- End function
.globl _Z19__device_stub__convPfS_S_iiii # -- Begin function _Z19__device_stub__convPfS_S_iiii
.p2align 4, 0x90
.type _Z19__device_stub__convPfS_S_iiii,@function
_Z19__device_stub__convPfS_S_iiii: # @_Z19__device_stub__convPfS_S_iiii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z4convPfS_S_iiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size _Z19__device_stub__convPfS_S_iiii, .Lfunc_end1-_Z19__device_stub__convPfS_S_iiii
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x413fa3ff80000000 # double 2073599.5
.LCPI2_1:
.quad 0x3ff0000000000000 # double 1
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $1592, %rsp # imm = 0x638
.cfi_def_cfa_offset 1648
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $8294400, %edi # imm = 0x7E9000
callq _Znam
movq %rax, %r14
xorl %eax, %eax
movq %r14, %rcx
.p2align 4, 0x90
.LBB2_1: # %.preheader119
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
xorl %edx, %edx
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
leal (%rax,%rdx), %esi
movzbl %sil, %esi
xorps %xmm0, %xmm0
cvtsi2ss %esi, %xmm0
movss %xmm0, (%rcx,%rdx,4)
incq %rdx
cmpq $1920, %rdx # imm = 0x780
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
incq %rax
addq $7680, %rcx # imm = 0x1E00
cmpq $1080, %rax # imm = 0x438
jne .LBB2_1
# %bb.4:
movl $800, %edi # imm = 0x320
callq _Znam
movq %rax, %rbx
movl $-1, %eax
xorl %ecx, %ecx
movl $3435973837, %edx # imm = 0xCCCCCCCD
.p2align 4, 0x90
.LBB2_5: # =>This Inner Loop Header: Depth=1
movl %ecx, %esi
imulq %rdx, %rsi
shrq $34, %rsi
leal (%rsi,%rsi,4), %esi
movl %eax, %edi
subl %esi, %edi
xorps %xmm0, %xmm0
cvtsi2ss %edi, %xmm0
movss %xmm0, (%rbx,%rcx,4)
incq %rcx
incl %eax
cmpq $200, %rcx
jne .LBB2_5
# %bb.6:
leaq 48(%rsp), %rdi
movl $8294400, %esi # imm = 0x7E9000
callq hipMalloc
leaq 40(%rsp), %rdi
movl $800, %esi # imm = 0x320
callq hipMalloc
leaq 32(%rsp), %rdi
movl $66355200, %esi # imm = 0x3F48000
callq hipMalloc
movq 48(%rsp), %rdi
movl $8294400, %edx # imm = 0x7E9000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 40(%rsp), %rdi
movl $800, %edx # imm = 0x320
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
leaq 16(%rsp), %rdi
callq hipGetDeviceCount
movl 16(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
leaq 112(%rsp), %rdi
xorl %esi, %esi
callq hipGetDevicePropertiesR0600
movl 432(%rsp), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movl 448(%rsp), %esi
movl 452(%rsp), %edx
movl 456(%rsp), %ecx
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl 432(%rsp), %edx
xorps %xmm0, %xmm0
cvtsi2sd %edx, %xmm0
movsd .LCPI2_0(%rip), %xmm1 # xmm1 = mem[0],zero
divsd %xmm0, %xmm1
addsd .LCPI2_1(%rip), %xmm1
cvttsd2si %xmm1, %edi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
orq %rax, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_8
# %bb.7:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $1920, 12(%rsp) # imm = 0x780
movl $1080, 8(%rsp) # imm = 0x438
movl $8, 4(%rsp)
movl $5, (%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
movq %rsp, %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 16(%rsp), %rsi
movl 24(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z4convPfS_S_iiii, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_8:
movl $66355200, %edi # imm = 0x3F48000
callq _Znam
movq %rax, %r15
movq 32(%rsp), %rsi
movl $66355200, %edx # imm = 0x3F48000
movq %rax, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_9: # %.preheader118
# =>This Loop Header: Depth=1
# Child Loop BB2_10 Depth 2
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB2_10: # Parent Loop BB2_9 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r14,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
incq %r13
cmpq $10, %r13
jne .LBB2_10
# %bb.11: # in Loop: Header=BB2_9 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r12
addq $7680, %r14 # imm = 0x1E00
cmpq $10, %r12
jne .LBB2_9
# %bb.12:
movl $10, %edi
callq putchar@PLT
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_13: # %.preheader117
# =>This Loop Header: Depth=1
# Child Loop BB2_14 Depth 2
# Child Loop BB2_15 Depth 3
movq %rbx, %r12
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB2_14: # %.preheader116
# Parent Loop BB2_13 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_15 Depth 3
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB2_15: # Parent Loop BB2_13 Depth=1
# Parent Loop BB2_14 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r12,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
incq %rbp
cmpq $5, %rbp
jne .LBB2_15
# %bb.16: # in Loop: Header=BB2_14 Depth=2
movl $10, %edi
callq putchar@PLT
incq %r13
addq $20, %r12
cmpq $5, %r13
jne .LBB2_14
# %bb.17: # in Loop: Header=BB2_13 Depth=1
movl $.Lstr.1, %edi
callq puts@PLT
leaq 1(%r14), %rax
addq $100, %rbx
testq %r14, %r14
movq %rax, %r14
je .LBB2_13
# %bb.18: # %.preheader114.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_19: # %.preheader114
# =>This Loop Header: Depth=1
# Child Loop BB2_20 Depth 2
# Child Loop BB2_21 Depth 3
movq %r15, %r14
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_20: # %.preheader
# Parent Loop BB2_19 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_21 Depth 3
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB2_21: # Parent Loop BB2_19 Depth=1
# Parent Loop BB2_20 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r14,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
incq %r13
cmpq $10, %r13
jne .LBB2_21
# %bb.22: # in Loop: Header=BB2_20 Depth=2
movl $10, %edi
callq putchar@PLT
incq %r12
addq $7680, %r14 # imm = 0x1E00
cmpq $10, %r12
jne .LBB2_20
# %bb.23: # in Loop: Header=BB2_19 Depth=1
movl $.Lstr.1, %edi
callq puts@PLT
leaq 1(%rbx), %rax
addq $8294400, %r15 # imm = 0x7E9000
testq %rbx, %rbx
movq %rax, %rbx
je .LBB2_19
# %bb.24:
xorl %eax, %eax
addq $1592, %rsp # imm = 0x638
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4convPfS_S_iiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "gpu num %d\n"
.size .L.str, 12
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "max thread num : %d\n"
.size .L.str.1, 21
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "grid dimensions : %d %d %d\n"
.size .L.str.2, 28
.type _Z4convPfS_S_iiii,@object # @_Z4convPfS_S_iiii
.section .rodata,"a",@progbits
.globl _Z4convPfS_S_iiii
.p2align 3, 0x0
_Z4convPfS_S_iiii:
.quad _Z19__device_stub__convPfS_S_iiii
.size _Z4convPfS_S_iiii, 8
.type .L.str.3,@object # @.str.3
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.3:
.asciz "%2.0f "
.size .L.str.3, 7
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4convPfS_S_iiii"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr.1,@object # @str.1
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr.1:
.asciz "\n"
.size .Lstr.1, 2
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__convPfS_S_iiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4convPfS_S_iiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void coalesced4(float *A, float *C, const int N)
{
int i = (blockIdx.x * blockDim.x + threadIdx.x)*4;
if (i+3 < N) { C[i] = A[i]; C[i+1] = A[i+1];
C[i+2] = A[i+2]; C[i+3] = A[i+3];}
} | code for sm_80
Function : _Z10coalesced4PfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ SHF.L.U32 R0, R0, 0x2, RZ ; /* 0x0000000200007819 */
/* 0x000fc800000006ff */
/*0050*/ IADD3 R2, R0, 0x3, RZ ; /* 0x0000000300027810 */
/* 0x000fc80007ffe0ff */
/*0060*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06270 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*00a0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0205 */
/*00b0*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fca00078e0205 */
/*00d0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x004fe8000c101904 */
/*00e0*/ LDG.E R9, [R2.64+0x4] ; /* 0x0000040402097981 */
/* 0x000ea8000c1e1900 */
/*00f0*/ STG.E [R4.64+0x4], R9 ; /* 0x0000040904007986 */
/* 0x004fe8000c101904 */
/*0100*/ LDG.E R11, [R2.64+0x8] ; /* 0x00000804020b7981 */
/* 0x000ea8000c1e1900 */
/*0110*/ STG.E [R4.64+0x8], R11 ; /* 0x0000080b04007986 */
/* 0x004fe8000c101904 */
/*0120*/ LDG.E R13, [R2.64+0xc] ; /* 0x00000c04020d7981 */
/* 0x000ea8000c1e1900 */
/*0130*/ STG.E [R4.64+0xc], R13 ; /* 0x00000c0d04007986 */
/* 0x004fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void coalesced4(float *A, float *C, const int N)
{
int i = (blockIdx.x * blockDim.x + threadIdx.x)*4;
if (i+3 < N) { C[i] = A[i]; C[i+1] = A[i+1];
C[i+2] = A[i+2]; C[i+3] = A[i+3];}
} | .file "tmpxft_001b79e6_00000000-6_coalesced4.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z10coalesced4PfS_iPfS_i
.type _Z33__device_stub__Z10coalesced4PfS_iPfS_i, @function
_Z33__device_stub__Z10coalesced4PfS_iPfS_i:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10coalesced4PfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z10coalesced4PfS_iPfS_i, .-_Z33__device_stub__Z10coalesced4PfS_iPfS_i
.globl _Z10coalesced4PfS_i
.type _Z10coalesced4PfS_i, @function
_Z10coalesced4PfS_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z10coalesced4PfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z10coalesced4PfS_i, .-_Z10coalesced4PfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10coalesced4PfS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10coalesced4PfS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void coalesced4(float *A, float *C, const int N)
{
int i = (blockIdx.x * blockDim.x + threadIdx.x)*4;
if (i+3 < N) { C[i] = A[i]; C[i+1] = A[i+1];
C[i+2] = A[i+2]; C[i+3] = A[i+3];}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void coalesced4(float *A, float *C, const int N)
{
int i = (blockIdx.x * blockDim.x + threadIdx.x)*4;
if (i+3 < N) { C[i] = A[i]; C[i+1] = A[i+1];
C[i+2] = A[i+2]; C[i+3] = A[i+3];}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void coalesced4(float *A, float *C, const int N)
{
int i = (blockIdx.x * blockDim.x + threadIdx.x)*4;
if (i+3 < N) { C[i] = A[i]; C[i+1] = A[i+1];
C[i+2] = A[i+2]; C[i+3] = A[i+3];}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10coalesced4PfS_i
.globl _Z10coalesced4PfS_i
.p2align 8
.type _Z10coalesced4PfS_i,@function
_Z10coalesced4PfS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_mul_i32 s15, s15, s2
s_mov_b32 s2, exec_lo
v_add_lshl_u32 v1, s15, v0, 2
v_or_b32_e32 v0, 3, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s3, v0
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s0, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_load_b32 v8, v[4:5], off
v_or_b32_e32 v4, 1, v1
v_or_b32_e32 v1, 2, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v6, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v5, vcc_lo
v_add_co_u32 v4, vcc_lo, s2, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[2:3], v8, off
global_load_b32 v8, v[6:7], off
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v6, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[4:5], v8, off
global_load_b32 v6, v[6:7], off
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[2:3], v6, off
global_load_b32 v2, v[4:5], off
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10coalesced4PfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10coalesced4PfS_i, .Lfunc_end0-_Z10coalesced4PfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10coalesced4PfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10coalesced4PfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void coalesced4(float *A, float *C, const int N)
{
int i = (blockIdx.x * blockDim.x + threadIdx.x)*4;
if (i+3 < N) { C[i] = A[i]; C[i+1] = A[i+1];
C[i+2] = A[i+2]; C[i+3] = A[i+3];}
} | .text
.file "coalesced4.hip"
.globl _Z25__device_stub__coalesced4PfS_i # -- Begin function _Z25__device_stub__coalesced4PfS_i
.p2align 4, 0x90
.type _Z25__device_stub__coalesced4PfS_i,@function
_Z25__device_stub__coalesced4PfS_i: # @_Z25__device_stub__coalesced4PfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10coalesced4PfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z25__device_stub__coalesced4PfS_i, .Lfunc_end0-_Z25__device_stub__coalesced4PfS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10coalesced4PfS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10coalesced4PfS_i,@object # @_Z10coalesced4PfS_i
.section .rodata,"a",@progbits
.globl _Z10coalesced4PfS_i
.p2align 3, 0x0
_Z10coalesced4PfS_i:
.quad _Z25__device_stub__coalesced4PfS_i
.size _Z10coalesced4PfS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10coalesced4PfS_i"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__coalesced4PfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10coalesced4PfS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10coalesced4PfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ SHF.L.U32 R0, R0, 0x2, RZ ; /* 0x0000000200007819 */
/* 0x000fc800000006ff */
/*0050*/ IADD3 R2, R0, 0x3, RZ ; /* 0x0000000300027810 */
/* 0x000fc80007ffe0ff */
/*0060*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06270 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*00a0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0205 */
/*00b0*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fca00078e0205 */
/*00d0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x004fe8000c101904 */
/*00e0*/ LDG.E R9, [R2.64+0x4] ; /* 0x0000040402097981 */
/* 0x000ea8000c1e1900 */
/*00f0*/ STG.E [R4.64+0x4], R9 ; /* 0x0000040904007986 */
/* 0x004fe8000c101904 */
/*0100*/ LDG.E R11, [R2.64+0x8] ; /* 0x00000804020b7981 */
/* 0x000ea8000c1e1900 */
/*0110*/ STG.E [R4.64+0x8], R11 ; /* 0x0000080b04007986 */
/* 0x004fe8000c101904 */
/*0120*/ LDG.E R13, [R2.64+0xc] ; /* 0x00000c04020d7981 */
/* 0x000ea8000c1e1900 */
/*0130*/ STG.E [R4.64+0xc], R13 ; /* 0x00000c0d04007986 */
/* 0x004fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10coalesced4PfS_i
.globl _Z10coalesced4PfS_i
.p2align 8
.type _Z10coalesced4PfS_i,@function
_Z10coalesced4PfS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_mul_i32 s15, s15, s2
s_mov_b32 s2, exec_lo
v_add_lshl_u32 v1, s15, v0, 2
v_or_b32_e32 v0, 3, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s3, v0
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s0, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_load_b32 v8, v[4:5], off
v_or_b32_e32 v4, 1, v1
v_or_b32_e32 v1, 2, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v6, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v5, vcc_lo
v_add_co_u32 v4, vcc_lo, s2, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[2:3], v8, off
global_load_b32 v8, v[6:7], off
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v6, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[4:5], v8, off
global_load_b32 v6, v[6:7], off
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[2:3], v6, off
global_load_b32 v2, v[4:5], off
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10coalesced4PfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10coalesced4PfS_i, .Lfunc_end0-_Z10coalesced4PfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10coalesced4PfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10coalesced4PfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001b79e6_00000000-6_coalesced4.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z10coalesced4PfS_iPfS_i
.type _Z33__device_stub__Z10coalesced4PfS_iPfS_i, @function
_Z33__device_stub__Z10coalesced4PfS_iPfS_i:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10coalesced4PfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z10coalesced4PfS_iPfS_i, .-_Z33__device_stub__Z10coalesced4PfS_iPfS_i
.globl _Z10coalesced4PfS_i
.type _Z10coalesced4PfS_i, @function
_Z10coalesced4PfS_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z10coalesced4PfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z10coalesced4PfS_i, .-_Z10coalesced4PfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10coalesced4PfS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10coalesced4PfS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "coalesced4.hip"
.globl _Z25__device_stub__coalesced4PfS_i # -- Begin function _Z25__device_stub__coalesced4PfS_i
.p2align 4, 0x90
.type _Z25__device_stub__coalesced4PfS_i,@function
_Z25__device_stub__coalesced4PfS_i: # @_Z25__device_stub__coalesced4PfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10coalesced4PfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z25__device_stub__coalesced4PfS_i, .Lfunc_end0-_Z25__device_stub__coalesced4PfS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10coalesced4PfS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10coalesced4PfS_i,@object # @_Z10coalesced4PfS_i
.section .rodata,"a",@progbits
.globl _Z10coalesced4PfS_i
.p2align 3, 0x0
_Z10coalesced4PfS_i:
.quad _Z25__device_stub__coalesced4PfS_i
.size _Z10coalesced4PfS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10coalesced4PfS_i"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__coalesced4PfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10coalesced4PfS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void EmptyKernel() {
//extern __shared__ thrust::complex<float> filter_products[];
} | code for sm_80
Function : _Z11EmptyKernelv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void EmptyKernel() {
//extern __shared__ thrust::complex<float> filter_products[];
} | .file "tmpxft_000eac8e_00000000-6_EmptyKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z11EmptyKernelvv
.type _Z30__device_stub__Z11EmptyKernelvv, @function
_Z30__device_stub__Z11EmptyKernelvv:
.LFB2051:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z11EmptyKernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z30__device_stub__Z11EmptyKernelvv, .-_Z30__device_stub__Z11EmptyKernelvv
.globl _Z11EmptyKernelv
.type _Z11EmptyKernelv, @function
_Z11EmptyKernelv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z11EmptyKernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z11EmptyKernelv, .-_Z11EmptyKernelv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z11EmptyKernelv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z11EmptyKernelv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void EmptyKernel() {
//extern __shared__ thrust::complex<float> filter_products[];
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void EmptyKernel() {
//extern __shared__ thrust::complex<float> filter_products[];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void EmptyKernel() {
//extern __shared__ thrust::complex<float> filter_products[];
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11EmptyKernelv
.globl _Z11EmptyKernelv
.p2align 8
.type _Z11EmptyKernelv,@function
_Z11EmptyKernelv:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11EmptyKernelv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11EmptyKernelv, .Lfunc_end0-_Z11EmptyKernelv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11EmptyKernelv
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z11EmptyKernelv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void EmptyKernel() {
//extern __shared__ thrust::complex<float> filter_products[];
} | .text
.file "EmptyKernel.hip"
.globl _Z26__device_stub__EmptyKernelv # -- Begin function _Z26__device_stub__EmptyKernelv
.p2align 4, 0x90
.type _Z26__device_stub__EmptyKernelv,@function
_Z26__device_stub__EmptyKernelv: # @_Z26__device_stub__EmptyKernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z11EmptyKernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z26__device_stub__EmptyKernelv, .Lfunc_end0-_Z26__device_stub__EmptyKernelv
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11EmptyKernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11EmptyKernelv,@object # @_Z11EmptyKernelv
.section .rodata,"a",@progbits
.globl _Z11EmptyKernelv
.p2align 3, 0x0
_Z11EmptyKernelv:
.quad _Z26__device_stub__EmptyKernelv
.size _Z11EmptyKernelv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z11EmptyKernelv"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__EmptyKernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11EmptyKernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z11EmptyKernelv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11EmptyKernelv
.globl _Z11EmptyKernelv
.p2align 8
.type _Z11EmptyKernelv,@function
_Z11EmptyKernelv:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11EmptyKernelv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11EmptyKernelv, .Lfunc_end0-_Z11EmptyKernelv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11EmptyKernelv
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z11EmptyKernelv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000eac8e_00000000-6_EmptyKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z11EmptyKernelvv
.type _Z30__device_stub__Z11EmptyKernelvv, @function
_Z30__device_stub__Z11EmptyKernelvv:
.LFB2051:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z11EmptyKernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z30__device_stub__Z11EmptyKernelvv, .-_Z30__device_stub__Z11EmptyKernelvv
.globl _Z11EmptyKernelv
.type _Z11EmptyKernelv, @function
_Z11EmptyKernelv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z11EmptyKernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z11EmptyKernelv, .-_Z11EmptyKernelv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z11EmptyKernelv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z11EmptyKernelv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "EmptyKernel.hip"
.globl _Z26__device_stub__EmptyKernelv # -- Begin function _Z26__device_stub__EmptyKernelv
.p2align 4, 0x90
.type _Z26__device_stub__EmptyKernelv,@function
_Z26__device_stub__EmptyKernelv: # @_Z26__device_stub__EmptyKernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z11EmptyKernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z26__device_stub__EmptyKernelv, .Lfunc_end0-_Z26__device_stub__EmptyKernelv
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11EmptyKernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11EmptyKernelv,@object # @_Z11EmptyKernelv
.section .rodata,"a",@progbits
.globl _Z11EmptyKernelv
.p2align 3, 0x0
_Z11EmptyKernelv:
.quad _Z26__device_stub__EmptyKernelv
.size _Z11EmptyKernelv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z11EmptyKernelv"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__EmptyKernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11EmptyKernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <cuda.h>
__global__ void kernel()
{
printf("hello cuda\n");
__syncthreads();
}
int main()
{
kernel<<<1, 1>>>();
cudaDeviceSynchronize();
int i = 0, a = 0;
for(i = 0; i < 10000000; i ++)
a ++;
printf("a = %d\n", a);
return 0;
} | code for sm_80
Function : _Z6kernelv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0020*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0030*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0040*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0050*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x00006c0000000a00 */
/*0060*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x000fe40000000000 */
/*0070*/ MOV R11, 0xe0 ; /* 0x000000e0000b7802 */
/* 0x000fe40000000f00 */
/*0080*/ MOV R20, 0x60 ; /* 0x0000006000147802 */
/* 0x000fe40000000f00 */
/*0090*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*00a0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x001fc40000000f00 */
/*00b0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*00c0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*00d0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x002fea0003c00000 */
/*00e0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*00f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <cuda.h>
__global__ void kernel()
{
printf("hello cuda\n");
__syncthreads();
}
int main()
{
kernel<<<1, 1>>>();
cudaDeviceSynchronize();
int i = 0, a = 0;
for(i = 0; i < 10000000; i ++)
a ++;
printf("a = %d\n", a);
return 0;
} | .file "tmpxft_000bd22a_00000000-6_hello_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z24__device_stub__Z6kernelvv
.type _Z24__device_stub__Z6kernelvv, @function
_Z24__device_stub__Z6kernelvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z6kernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z24__device_stub__Z6kernelvv, .-_Z24__device_stub__Z6kernelvv
.globl _Z6kernelv
.type _Z6kernelv, @function
_Z6kernelv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z24__device_stub__Z6kernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6kernelv, .-_Z6kernelv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "a = %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
call cudaDeviceSynchronize@PLT
movl $10000000, %eax
.L13:
subl $1, %eax
jne .L13
movl $10000000, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
call _Z24__device_stub__Z6kernelvv
jmp .L12
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z6kernelv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <cuda.h>
__global__ void kernel()
{
printf("hello cuda\n");
__syncthreads();
}
int main()
{
kernel<<<1, 1>>>();
cudaDeviceSynchronize();
int i = 0, a = 0;
for(i = 0; i < 10000000; i ++)
a ++;
printf("a = %d\n", a);
return 0;
} | #include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void kernel()
{
printf("hello cuda\n");
__syncthreads();
}
int main()
{
kernel<<<1, 1>>>();
hipDeviceSynchronize();
int i = 0, a = 0;
for(i = 0; i < 10000000; i ++)
a ++;
printf("a = %d\n", a);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void kernel()
{
printf("hello cuda\n");
__syncthreads();
}
int main()
{
kernel<<<1, 1>>>();
hipDeviceSynchronize();
int i = 0, a = 0;
for(i = 0; i < 10000000; i ++)
a ++;
printf("a = %d\n", a);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelv
.globl _Z6kernelv
.p2align 8
.type _Z6kernelv,@function
_Z6kernelv:
s_load_b64 s[2:3], s[0:1], 0x50
v_mbcnt_lo_u32_b32 v20, -1, 0
v_mov_b32_e32 v6, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_dual_mov_b32 v7, 0 :: v_dual_mov_b32 v4, v20
v_readfirstlane_b32 s0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s0, s0, v4
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_6
v_mov_b32_e32 v0, 0
s_mov_b32 s4, exec_lo
s_waitcnt lgkmcnt(0)
global_load_b64 v[8:9], v0, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[5:6], v0, s[2:3]
s_waitcnt vmcnt(1)
v_and_b32_e32 v1, v1, v8
v_and_b32_e32 v2, v2, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v3, v1, 24
v_mul_lo_u32 v2, v2, 24
v_mul_lo_u32 v1, v1, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v3, v2
s_waitcnt vmcnt(0)
v_add_co_u32 v1, vcc_lo, v5, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, v6, v2, vcc_lo
global_load_b64 v[6:7], v[1:2], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[6:7], v[8:9]
s_cbranch_execz .LBB0_5
s_mov_b32 s5, 0
.p2align 6
.LBB0_3:
s_sleep 1
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[10:11], v0, s[2:3]
v_dual_mov_b32 v9, v7 :: v_dual_mov_b32 v8, v6
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v1, v1, v8
v_and_b32_e32 v7, v2, v9
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[5:6], null, v1, 24, v[10:11]
v_mov_b32_e32 v1, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v7, 24, v[1:2]
v_mov_b32_e32 v6, v2
global_load_b64 v[6:7], v[5:6], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[8:9]
s_or_b32 s5, vcc_lo, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s5
s_cbranch_execnz .LBB0_3
s_or_b32 exec_lo, exec_lo, s5
.LBB0_5:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s4
.LBB0_6:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
v_mov_b32_e32 v5, 0
v_readfirstlane_b32 s4, v6
v_readfirstlane_b32 s5, v7
s_mov_b32 s8, exec_lo
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b64 v[8:9], v5, s[2:3] offset:40
global_load_b128 v[0:3], v5, s[2:3]
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s6, v8
v_readfirstlane_b32 s7, v9
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[6:7], s[4:5], s[6:7]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_8
v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, 0
s_mul_i32 s8, s7, 24
s_mul_hi_u32 s9, s6, 24
v_dual_mov_b32 v8, 2 :: v_dual_mov_b32 v9, 1
s_add_i32 s9, s9, s8
s_mul_i32 s8, s6, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v10, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v11, vcc_lo, s9, v1, vcc_lo
global_store_b128 v[10:11], v[6:9], off offset:8
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s1
s_lshl_b64 s[8:9], s[6:7], 12
v_lshlrev_b64 v[4:5], 6, v[4:5]
s_waitcnt vmcnt(0)
v_add_co_u32 v2, vcc_lo, v2, s8
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v3, vcc_lo
v_mov_b32_e32 v3, 0
s_mov_b32 s8, 0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_u32 v6, vcc_lo, v2, v4
v_mov_b32_e32 v2, 33
s_mov_b32 s9, s8
s_mov_b32 s10, s8
s_mov_b32 s11, s8
v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo
v_mov_b32_e32 v4, v3
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v8, s8
v_dual_mov_b32 v9, s9 :: v_dual_mov_b32 v10, s10
v_mov_b32_e32 v11, s11
s_clause 0x3
global_store_b128 v[6:7], v[2:5], off
global_store_b128 v[6:7], v[8:11], off offset:16
global_store_b128 v[6:7], v[8:11], off offset:32
global_store_b128 v[6:7], v[8:11], off offset:48
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_16
v_dual_mov_b32 v10, 0 :: v_dual_mov_b32 v11, s4
v_mov_b32_e32 v12, s5
s_clause 0x1
global_load_b64 v[13:14], v10, s[2:3] offset:32 glc
global_load_b64 v[2:3], v10, s[2:3] offset:40
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
v_readfirstlane_b32 s9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[8:9], s[8:9], s[4:5]
s_mul_i32 s9, s9, 24
s_mul_hi_u32 s10, s8, 24
s_mul_i32 s8, s8, 24
s_add_i32 s10, s10, s9
v_add_co_u32 v8, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v9, vcc_lo, s10, v1, vcc_lo
s_mov_b32 s8, exec_lo
global_store_b64 v[8:9], v[13:14], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v10, v[11:14], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[4:5], v[13:14]
s_cbranch_execz .LBB0_12
s_mov_b32 s9, 0
.LBB0_11:
v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
s_sleep 1
global_store_b64 v[8:9], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v10, v[2:5], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
s_or_b32 s9, vcc_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_11
.LBB0_12:
s_or_b32 exec_lo, exec_lo, s8
v_mov_b32_e32 v2, 0
s_mov_b32 s9, exec_lo
s_mov_b32 s8, exec_lo
v_mbcnt_lo_u32_b32 v4, s9, 0
global_load_b64 v[2:3], v2, s[2:3] offset:16
v_cmpx_eq_u32_e32 0, v4
s_cbranch_execz .LBB0_14
s_bcnt1_i32_b32 s9, s9
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[2:3], v[4:5], off offset:8
.LBB0_14:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt vmcnt(0)
global_load_b64 v[4:5], v[2:3], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
s_cbranch_vccnz .LBB0_16
global_load_b32 v2, v[2:3], off offset:24
v_mov_b32_e32 v3, 0
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
s_waitcnt_vscnt null, 0x0
global_store_b64 v[4:5], v[2:3], off
s_and_b32 m0, s8, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s7, 24
s_mul_hi_u32 s7, s6, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s7, s7, s1
s_mul_i32 s1, s6, 24
v_add_co_u32 v0, vcc_lo, v0, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_20
.p2align 6
.LBB0_17:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_19
s_sleep 1
s_cbranch_execnz .LBB0_20
s_branch .LBB0_22
.p2align 6
.LBB0_19:
s_branch .LBB0_22
.LBB0_20:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_17
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_17
.LBB0_22:
global_load_b64 v[22:23], v[6:7], off
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_26
v_mov_b32_e32 v6, 0
s_clause 0x2
global_load_b64 v[2:3], v6, s[2:3] offset:40
global_load_b64 v[7:8], v6, s[2:3] offset:24 glc
global_load_b64 v[4:5], v6, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v9, vcc_lo, v2, 1
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v9, s4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9
v_and_b32_e32 v3, v1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v2, v0, v2
v_mul_lo_u32 v3, v3, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_hi_u32 v9, v2, 24
v_mul_lo_u32 v2, v2, 24
v_add_nc_u32_e32 v3, v9, v3
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, v4, v2
v_mov_b32_e32 v2, v7
v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo
v_mov_b32_e32 v3, v8
global_store_b64 v[4:5], v[7:8], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_26
s_mov_b32 s0, 0
.LBB0_25:
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_25
.LBB0_26:
s_or_b32 exec_lo, exec_lo, s1
s_getpc_b64 s[4:5]
s_add_u32 s4, s4, .str@rel32@lo+4
s_addc_u32 s5, s5, .str@rel32@hi+12
s_mov_b32 s0, -1
s_cmp_lg_u64 s[4:5], 0
s_cbranch_scc0 .LBB0_105
s_waitcnt vmcnt(0)
v_dual_mov_b32 v1, v23 :: v_dual_and_b32 v0, -3, v22
v_mov_b32_e32 v25, 0
s_mov_b64 s[6:7], 12
s_branch .LBB0_29
.LBB0_28:
s_or_b32 exec_lo, exec_lo, s1
s_sub_u32 s6, s6, s8
s_subb_u32 s7, s7, s9
s_add_u32 s4, s4, s8
s_addc_u32 s5, s5, s9
s_cmp_lg_u64 s[6:7], 0
s_cbranch_scc0 .LBB0_104
.LBB0_29:
v_cmp_lt_u64_e64 s0, s[6:7], 56
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 s0, s0, exec_lo
s_cselect_b32 s8, s6, 56
s_cselect_b32 s9, s7, 0
s_cmp_gt_u32 s8, 7
s_mov_b32 s0, -1
s_cbranch_scc1 .LBB0_34
v_mov_b32_e32 v2, 0
v_mov_b32_e32 v3, 0
s_cmp_eq_u32 s8, 0
s_cbranch_scc1 .LBB0_33
s_lshl_b64 s[0:1], s[8:9], 3
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], s[4:5]
.LBB0_32:
global_load_u8 v4, v25, s[12:13]
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v4
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b64 v[4:5], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_add_u32 s12, s12, 1
s_addc_u32 s13, s13, 0
s_cmp_lg_u32 s0, s10
v_or_b32_e32 v2, v4, v2
v_or_b32_e32 v3, v5, v3
s_cbranch_scc1 .LBB0_32
.LBB0_33:
s_mov_b32 s0, 0
s_mov_b32 s15, 0
.LBB0_34:
s_and_not1_b32 vcc_lo, exec_lo, s0
s_mov_b64 s[0:1], s[4:5]
s_cbranch_vccnz .LBB0_36
global_load_b64 v[2:3], v25, s[4:5]
s_add_i32 s15, s8, -8
s_add_u32 s0, s4, 8
s_addc_u32 s1, s5, 0
.LBB0_36:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_41
v_mov_b32_e32 v4, 0
v_mov_b32_e32 v5, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_40
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_39:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v6, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v4, v6, v4
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v5, v7, v5
s_cbranch_scc1 .LBB0_39
.LBB0_40:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_42
s_branch .LBB0_43
.LBB0_41:
.LBB0_42:
global_load_b64 v[4:5], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_43:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_48
v_mov_b32_e32 v6, 0
v_mov_b32_e32 v7, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_47
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_46:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v8, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[8:9], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s14, s12
v_or_b32_e32 v6, v8, v6
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v7, v9, v7
s_cbranch_scc1 .LBB0_46
.LBB0_47:
s_mov_b32 s15, 0
s_cbranch_execz .LBB0_49
s_branch .LBB0_50
.LBB0_48:
.LBB0_49:
global_load_b64 v[6:7], v25, s[0:1]
s_add_i32 s15, s14, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_50:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_55
v_mov_b32_e32 v8, 0
v_mov_b32_e32 v9, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_54
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_53:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v10, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[10:11], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v8, v10, v8
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v9, v11, v9
s_cbranch_scc1 .LBB0_53
.LBB0_54:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_56
s_branch .LBB0_57
.LBB0_55:
.LBB0_56:
global_load_b64 v[8:9], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_57:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_62
v_mov_b32_e32 v10, 0
v_mov_b32_e32 v11, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_61
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_60:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v12, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[12:13], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s14, s12
v_or_b32_e32 v10, v12, v10
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v11, v13, v11
s_cbranch_scc1 .LBB0_60
.LBB0_61:
s_mov_b32 s15, 0
s_cbranch_execz .LBB0_63
s_branch .LBB0_64
.LBB0_62:
.LBB0_63:
global_load_b64 v[10:11], v25, s[0:1]
s_add_i32 s15, s14, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_64:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_69
v_mov_b32_e32 v12, 0
v_mov_b32_e32 v13, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_68
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_67:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v14, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[14:15], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v12, v14, v12
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v13, v15, v13
s_cbranch_scc1 .LBB0_67
.LBB0_68:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_70
s_branch .LBB0_71
.LBB0_69:
.LBB0_70:
global_load_b64 v[12:13], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_71:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_76
v_mov_b32_e32 v14, 0
v_mov_b32_e32 v15, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_75
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], s[0:1]
.LBB0_74:
global_load_u8 v16, v25, s[12:13]
s_add_i32 s14, s14, -1
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v16
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b64 v[16:17], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_add_u32 s12, s12, 1
s_addc_u32 s13, s13, 0
s_cmp_lg_u32 s14, 0
v_or_b32_e32 v14, v16, v14
v_or_b32_e32 v15, v17, v15
s_cbranch_scc1 .LBB0_74
.LBB0_75:
s_cbranch_execz .LBB0_77
s_branch .LBB0_78
.LBB0_76:
.LBB0_77:
global_load_b64 v[14:15], v25, s[0:1]
.LBB0_78:
v_mov_b32_e32 v24, v20
v_mov_b32_e32 v26, 0
v_mov_b32_e32 v27, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s0, v24
v_cmp_eq_u32_e64 s0, s0, v24
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_84
global_load_b64 v[18:19], v25, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[16:17], v25, s[2:3] offset:40
global_load_b64 v[26:27], v25, s[2:3]
s_mov_b32 s10, exec_lo
s_waitcnt vmcnt(1)
v_and_b32_e32 v17, v17, v19
v_and_b32_e32 v16, v16, v18
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v17, v17, 24
v_mul_hi_u32 v21, v16, 24
v_mul_lo_u32 v16, v16, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v17, v21, v17
s_waitcnt vmcnt(0)
v_add_co_u32 v16, vcc_lo, v26, v16
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v17, vcc_lo, v27, v17, vcc_lo
global_load_b64 v[16:17], v[16:17], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[26:27], v[18:19]
s_cbranch_execz .LBB0_83
s_mov_b32 s11, 0
.p2align 6
.LBB0_81:
s_sleep 1
s_clause 0x1
global_load_b64 v[16:17], v25, s[2:3] offset:40
global_load_b64 v[28:29], v25, s[2:3]
v_dual_mov_b32 v18, v26 :: v_dual_mov_b32 v19, v27
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_and_b32_e32 v16, v16, v18
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[26:27], null, v16, 24, v[28:29]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mov_b32 v16, v27 :: v_dual_and_b32 v17, v17, v19
v_mad_u64_u32 v[27:28], null, v17, 24, v[16:17]
global_load_b64 v[16:17], v[26:27], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[26:27], v[18:19]
s_or_b32 s11, vcc_lo, s11
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s11
s_cbranch_execnz .LBB0_81
s_or_b32 exec_lo, exec_lo, s11
.LBB0_83:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s10
.LBB0_84:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
s_clause 0x1
global_load_b64 v[28:29], v25, s[2:3] offset:40
global_load_b128 v[16:19], v25, s[2:3]
v_readfirstlane_b32 s10, v26
v_readfirstlane_b32 s11, v27
s_mov_b32 s14, exec_lo
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s12, v28
v_readfirstlane_b32 s13, v29
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[12:13], s[10:11], s[12:13]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_86
v_dual_mov_b32 v26, s14 :: v_dual_mov_b32 v27, 0
s_mul_i32 s14, s13, 24
s_mul_hi_u32 s15, s12, 24
v_dual_mov_b32 v28, 2 :: v_dual_mov_b32 v29, 1
s_add_i32 s15, s15, s14
s_mul_i32 s14, s12, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v30, vcc_lo, v16, s14
v_add_co_ci_u32_e32 v31, vcc_lo, s15, v17, vcc_lo
global_store_b128 v[30:31], v[26:29], off offset:8
.LBB0_86:
s_or_b32 exec_lo, exec_lo, s1
v_cmp_gt_u64_e64 vcc_lo, s[6:7], 56
v_or_b32_e32 v21, 2, v0
s_lshl_b64 s[14:15], s[12:13], 12
v_lshlrev_b64 v[26:27], 6, v[24:25]
s_lshl_b32 s1, s8, 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_i32 s1, s1, 28
v_cndmask_b32_e32 v0, v21, v0, vcc_lo
s_waitcnt vmcnt(0)
v_add_co_u32 v18, vcc_lo, v18, s14
v_add_co_ci_u32_e32 v19, vcc_lo, s15, v19, vcc_lo
s_and_b32 s1, s1, 0x1e0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v18, vcc_lo, v18, v26
v_and_or_b32 v0, v0, 0xffffff1f, s1
v_add_co_ci_u32_e32 v19, vcc_lo, v19, v27, vcc_lo
s_clause 0x3
global_store_b128 v[18:19], v[0:3], off
global_store_b128 v[18:19], v[4:7], off offset:16
global_store_b128 v[18:19], v[8:11], off offset:32
global_store_b128 v[18:19], v[12:15], off offset:48
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_94
s_clause 0x1
global_load_b64 v[8:9], v25, s[2:3] offset:32 glc
global_load_b64 v[0:1], v25, s[2:3] offset:40
v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s14, v0
v_readfirstlane_b32 s15, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[14:15], s[14:15], s[10:11]
s_mul_i32 s15, s15, 24
s_mul_hi_u32 s16, s14, 24
s_mul_i32 s14, s14, 24
s_add_i32 s16, s16, s15
v_add_co_u32 v4, vcc_lo, v16, s14
v_add_co_ci_u32_e32 v5, vcc_lo, s16, v17, vcc_lo
s_mov_b32 s14, exec_lo
global_store_b64 v[4:5], v[8:9], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v25, v[6:9], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[2:3], v[8:9]
s_cbranch_execz .LBB0_90
s_mov_b32 s15, 0
.LBB0_89:
v_dual_mov_b32 v0, s10 :: v_dual_mov_b32 v1, s11
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[0:1], v25, v[0:3], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
s_or_b32 s15, vcc_lo, s15
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s15
s_cbranch_execnz .LBB0_89
.LBB0_90:
s_or_b32 exec_lo, exec_lo, s14
global_load_b64 v[0:1], v25, s[2:3] offset:16
s_mov_b32 s15, exec_lo
s_mov_b32 s14, exec_lo
v_mbcnt_lo_u32_b32 v2, s15, 0
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v2
s_cbranch_execz .LBB0_92
s_bcnt1_i32_b32 s15, s15
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v2, s15
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[0:1], v[2:3], off offset:8
.LBB0_92:
s_or_b32 exec_lo, exec_lo, s14
s_waitcnt vmcnt(0)
global_load_b64 v[2:3], v[0:1], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
s_cbranch_vccnz .LBB0_94
global_load_b32 v24, v[0:1], off offset:24
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s14, v24
s_waitcnt_vscnt null, 0x0
global_store_b64 v[2:3], v[24:25], off
s_and_b32 m0, s14, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_94:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s13, 24
s_mul_hi_u32 s13, s12, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s13, s13, s1
s_mul_i32 s1, s12, 24
v_add_co_u32 v0, vcc_lo, v16, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s13, v17, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_98
.p2align 6
.LBB0_95:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_97
s_sleep 1
s_cbranch_execnz .LBB0_98
s_branch .LBB0_100
.p2align 6
.LBB0_97:
s_branch .LBB0_100
.LBB0_98:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_95
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_95
.LBB0_100:
global_load_b64 v[0:1], v[18:19], off
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_28
s_clause 0x2
global_load_b64 v[4:5], v25, s[2:3] offset:40
global_load_b64 v[8:9], v25, s[2:3] offset:24 glc
global_load_b64 v[6:7], v25, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v10, vcc_lo, v4, 1
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v10, s10
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
v_dual_cndmask_b32 v3, v3, v11 :: v_dual_cndmask_b32 v2, v2, v10
v_and_b32_e32 v5, v3, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_and_b32_e32 v4, v2, v4
v_mul_hi_u32 v10, v4, 24
v_mul_lo_u32 v4, v4, 24
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_u32 v6, vcc_lo, v6, v4
v_mov_b32_e32 v4, v8
v_mul_lo_u32 v5, v5, 24
v_add_nc_u32_e32 v5, v10, v5
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo
v_mov_b32_e32 v5, v9
global_store_b64 v[6:7], v[8:9], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v25, v[2:5], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[8:9]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_28
s_mov_b32 s0, 0
.LBB0_103:
s_sleep 1
global_store_b64 v[6:7], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[8:9], v25, v[2:5], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[8:9], v[4:5]
v_dual_mov_b32 v4, v8 :: v_dual_mov_b32 v5, v9
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_103
s_branch .LBB0_28
.LBB0_104:
s_mov_b32 s0, 0
.LBB0_105:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 vcc_lo, exec_lo, s0
s_cbranch_vccz .LBB0_133
v_readfirstlane_b32 s0, v20
v_mov_b32_e32 v4, 0
v_mov_b32_e32 v5, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s0, s0, v20
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_112
s_waitcnt vmcnt(0)
v_mov_b32_e32 v0, 0
s_mov_b32 s4, exec_lo
global_load_b64 v[6:7], v0, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[3:4], v0, s[2:3]
s_waitcnt vmcnt(1)
v_and_b32_e32 v1, v1, v6
v_and_b32_e32 v2, v2, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v5, v1, 24
v_mul_lo_u32 v2, v2, 24
v_mul_lo_u32 v1, v1, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v5, v2
s_waitcnt vmcnt(0)
v_add_co_u32 v1, vcc_lo, v3, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, v4, v2, vcc_lo
global_load_b64 v[4:5], v[1:2], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[4:5], v[6:7]
s_cbranch_execz .LBB0_111
s_mov_b32 s5, 0
.p2align 6
.LBB0_109:
s_sleep 1
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[8:9], v0, s[2:3]
v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_and_b32_e32 v1, v1, v6
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[3:4], null, v1, 24, v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mov_b32 v1, v4 :: v_dual_and_b32 v2, v2, v7
v_mad_u64_u32 v[4:5], null, v2, 24, v[1:2]
global_load_b64 v[4:5], v[3:4], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
s_or_b32 s5, vcc_lo, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s5
s_cbranch_execnz .LBB0_109
s_or_b32 exec_lo, exec_lo, s5
.LBB0_111:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s4
.LBB0_112:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
v_mov_b32_e32 v21, 0
v_readfirstlane_b32 s4, v4
v_readfirstlane_b32 s5, v5
s_mov_b32 s8, exec_lo
s_clause 0x1
global_load_b64 v[6:7], v21, s[2:3] offset:40
global_load_b128 v[0:3], v21, s[2:3]
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s6, v6
v_readfirstlane_b32 s7, v7
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[6:7], s[4:5], s[6:7]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_114
v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, 0
s_mul_i32 s8, s7, 24
s_mul_hi_u32 s9, s6, 24
v_dual_mov_b32 v6, 2 :: v_dual_mov_b32 v7, 1
s_add_i32 s9, s9, s8
s_mul_i32 s8, s6, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v8, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v9, vcc_lo, s9, v1, vcc_lo
global_store_b128 v[8:9], v[4:7], off offset:8
.LBB0_114:
s_or_b32 exec_lo, exec_lo, s1
s_lshl_b64 s[8:9], s[6:7], 12
v_and_or_b32 v22, v22, 0xffffff1d, 34
s_waitcnt vmcnt(0)
v_add_co_u32 v4, vcc_lo, v2, s8
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v3, vcc_lo
v_lshlrev_b64 v[2:3], 6, v[20:21]
s_mov_b32 s8, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_mov_b32 s9, s8
s_mov_b32 s10, s8
s_mov_b32 s11, s8
v_add_co_u32 v8, vcc_lo, v4, v2
v_mov_b32_e32 v6, 0
v_add_co_ci_u32_e32 v9, vcc_lo, v5, v3, vcc_lo
v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v5, s11
v_dual_mov_b32 v3, s9 :: v_dual_mov_b32 v4, s10
s_delay_alu instid0(VALU_DEP_4)
v_mov_b32_e32 v7, v6
s_clause 0x4
global_store_b64 v[8:9], v[22:23], off
global_store_b128 v[8:9], v[2:5], off offset:8
global_store_b128 v[8:9], v[2:5], off offset:24
global_store_b128 v[8:9], v[2:5], off offset:40
global_store_b64 v[8:9], v[6:7], off offset:56
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_122
v_dual_mov_b32 v8, 0 :: v_dual_mov_b32 v9, s4
v_mov_b32_e32 v10, s5
s_clause 0x1
global_load_b64 v[11:12], v8, s[2:3] offset:32 glc
global_load_b64 v[2:3], v8, s[2:3] offset:40
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
v_readfirstlane_b32 s9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[8:9], s[8:9], s[4:5]
s_mul_i32 s9, s9, 24
s_mul_hi_u32 s10, s8, 24
s_mul_i32 s8, s8, 24
s_add_i32 s10, s10, s9
v_add_co_u32 v6, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v7, vcc_lo, s10, v1, vcc_lo
s_mov_b32 s8, exec_lo
global_store_b64 v[6:7], v[11:12], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v8, v[9:12], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[4:5], v[11:12]
s_cbranch_execz .LBB0_118
s_mov_b32 s9, 0
.LBB0_117:
v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
s_sleep 1
global_store_b64 v[6:7], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v8, v[2:5], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
s_or_b32 s9, vcc_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_117
.LBB0_118:
s_or_b32 exec_lo, exec_lo, s8
v_mov_b32_e32 v2, 0
s_mov_b32 s9, exec_lo
s_mov_b32 s8, exec_lo
v_mbcnt_lo_u32_b32 v4, s9, 0
global_load_b64 v[2:3], v2, s[2:3] offset:16
v_cmpx_eq_u32_e32 0, v4
s_cbranch_execz .LBB0_120
s_bcnt1_i32_b32 s9, s9
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[2:3], v[4:5], off offset:8
.LBB0_120:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt vmcnt(0)
global_load_b64 v[4:5], v[2:3], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
s_cbranch_vccnz .LBB0_122
global_load_b32 v2, v[2:3], off offset:24
v_mov_b32_e32 v3, 0
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
s_waitcnt_vscnt null, 0x0
global_store_b64 v[4:5], v[2:3], off
s_and_b32 m0, s8, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_122:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s7, 24
s_mul_hi_u32 s7, s6, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s7, s7, s1
s_mul_i32 s1, s6, 24
v_add_co_u32 v0, vcc_lo, v0, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_126
.p2align 6
.LBB0_123:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_125
s_sleep 1
s_cbranch_execnz .LBB0_126
s_branch .LBB0_128
.p2align 6
.LBB0_125:
s_branch .LBB0_128
.LBB0_126:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_123
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_123
.LBB0_128:
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_132
v_mov_b32_e32 v6, 0
s_clause 0x2
global_load_b64 v[2:3], v6, s[2:3] offset:40
global_load_b64 v[7:8], v6, s[2:3] offset:24 glc
global_load_b64 v[4:5], v6, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v9, vcc_lo, v2, 1
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v9, s4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9
v_and_b32_e32 v3, v1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v2, v0, v2
v_mul_lo_u32 v3, v3, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_hi_u32 v9, v2, 24
v_mul_lo_u32 v2, v2, 24
v_add_nc_u32_e32 v3, v9, v3
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, v4, v2
v_mov_b32_e32 v2, v7
v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo
v_mov_b32_e32 v3, v8
global_store_b64 v[4:5], v[7:8], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_132
s_mov_b32 s0, 0
.LBB0_131:
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_131
.LBB0_132:
s_or_b32 exec_lo, exec_lo, s1
.LBB0_133:
s_waitcnt vmcnt(0) lgkmcnt(0)
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 256
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 32
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelv, .Lfunc_end0-_Z6kernelv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type .str,@object
.section .rodata.str1.1,"aMS",@progbits,1
.str:
.asciz "hello cuda\n"
.size .str, 12
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: hidden_block_count_x
- .offset: 4
.size: 4
.value_kind: hidden_block_count_y
- .offset: 8
.size: 4
.value_kind: hidden_block_count_z
- .offset: 12
.size: 2
.value_kind: hidden_group_size_x
- .offset: 14
.size: 2
.value_kind: hidden_group_size_y
- .offset: 16
.size: 2
.value_kind: hidden_group_size_z
- .offset: 18
.size: 2
.value_kind: hidden_remainder_x
- .offset: 20
.size: 2
.value_kind: hidden_remainder_y
- .offset: 22
.size: 2
.value_kind: hidden_remainder_z
- .offset: 40
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 64
.size: 2
.value_kind: hidden_grid_dims
- .offset: 80
.size: 8
.value_kind: hidden_hostcall_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 256
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelv
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z6kernelv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 32
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void kernel()
{
printf("hello cuda\n");
__syncthreads();
}
int main()
{
kernel<<<1, 1>>>();
hipDeviceSynchronize();
int i = 0, a = 0;
for(i = 0; i < 10000000; i ++)
a ++;
printf("a = %d\n", a);
return 0;
} | .text
.file "hello_cuda.hip"
.globl _Z21__device_stub__kernelv # -- Begin function _Z21__device_stub__kernelv
.p2align 4, 0x90
.type _Z21__device_stub__kernelv,@function
_Z21__device_stub__kernelv: # @_Z21__device_stub__kernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z6kernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelv, .Lfunc_end0-_Z21__device_stub__kernelv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z6kernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
movl $.L.str, %edi
movl $10000000, %esi # imm = 0x989680
xorl %eax, %eax
callq printf
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelv,@object # @_Z6kernelv
.section .rodata,"a",@progbits
.globl _Z6kernelv
.p2align 3, 0x0
_Z6kernelv:
.quad _Z21__device_stub__kernelv
.size _Z6kernelv, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "a = %d\n"
.size .L.str, 8
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kernelv"
.size .L__unnamed_1, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6kernelv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0020*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0030*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0040*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0050*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x00006c0000000a00 */
/*0060*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x000fe40000000000 */
/*0070*/ MOV R11, 0xe0 ; /* 0x000000e0000b7802 */
/* 0x000fe40000000f00 */
/*0080*/ MOV R20, 0x60 ; /* 0x0000006000147802 */
/* 0x000fe40000000f00 */
/*0090*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*00a0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x001fc40000000f00 */
/*00b0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*00c0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*00d0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x002fea0003c00000 */
/*00e0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*00f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelv
.globl _Z6kernelv
.p2align 8
.type _Z6kernelv,@function
_Z6kernelv:
s_load_b64 s[2:3], s[0:1], 0x50
v_mbcnt_lo_u32_b32 v20, -1, 0
v_mov_b32_e32 v6, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_dual_mov_b32 v7, 0 :: v_dual_mov_b32 v4, v20
v_readfirstlane_b32 s0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s0, s0, v4
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_6
v_mov_b32_e32 v0, 0
s_mov_b32 s4, exec_lo
s_waitcnt lgkmcnt(0)
global_load_b64 v[8:9], v0, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[5:6], v0, s[2:3]
s_waitcnt vmcnt(1)
v_and_b32_e32 v1, v1, v8
v_and_b32_e32 v2, v2, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v3, v1, 24
v_mul_lo_u32 v2, v2, 24
v_mul_lo_u32 v1, v1, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v3, v2
s_waitcnt vmcnt(0)
v_add_co_u32 v1, vcc_lo, v5, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, v6, v2, vcc_lo
global_load_b64 v[6:7], v[1:2], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[6:7], v[8:9]
s_cbranch_execz .LBB0_5
s_mov_b32 s5, 0
.p2align 6
.LBB0_3:
s_sleep 1
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[10:11], v0, s[2:3]
v_dual_mov_b32 v9, v7 :: v_dual_mov_b32 v8, v6
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v1, v1, v8
v_and_b32_e32 v7, v2, v9
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[5:6], null, v1, 24, v[10:11]
v_mov_b32_e32 v1, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v7, 24, v[1:2]
v_mov_b32_e32 v6, v2
global_load_b64 v[6:7], v[5:6], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[8:9]
s_or_b32 s5, vcc_lo, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s5
s_cbranch_execnz .LBB0_3
s_or_b32 exec_lo, exec_lo, s5
.LBB0_5:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s4
.LBB0_6:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
v_mov_b32_e32 v5, 0
v_readfirstlane_b32 s4, v6
v_readfirstlane_b32 s5, v7
s_mov_b32 s8, exec_lo
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b64 v[8:9], v5, s[2:3] offset:40
global_load_b128 v[0:3], v5, s[2:3]
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s6, v8
v_readfirstlane_b32 s7, v9
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[6:7], s[4:5], s[6:7]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_8
v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, 0
s_mul_i32 s8, s7, 24
s_mul_hi_u32 s9, s6, 24
v_dual_mov_b32 v8, 2 :: v_dual_mov_b32 v9, 1
s_add_i32 s9, s9, s8
s_mul_i32 s8, s6, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v10, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v11, vcc_lo, s9, v1, vcc_lo
global_store_b128 v[10:11], v[6:9], off offset:8
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s1
s_lshl_b64 s[8:9], s[6:7], 12
v_lshlrev_b64 v[4:5], 6, v[4:5]
s_waitcnt vmcnt(0)
v_add_co_u32 v2, vcc_lo, v2, s8
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v3, vcc_lo
v_mov_b32_e32 v3, 0
s_mov_b32 s8, 0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_u32 v6, vcc_lo, v2, v4
v_mov_b32_e32 v2, 33
s_mov_b32 s9, s8
s_mov_b32 s10, s8
s_mov_b32 s11, s8
v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo
v_mov_b32_e32 v4, v3
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v8, s8
v_dual_mov_b32 v9, s9 :: v_dual_mov_b32 v10, s10
v_mov_b32_e32 v11, s11
s_clause 0x3
global_store_b128 v[6:7], v[2:5], off
global_store_b128 v[6:7], v[8:11], off offset:16
global_store_b128 v[6:7], v[8:11], off offset:32
global_store_b128 v[6:7], v[8:11], off offset:48
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_16
v_dual_mov_b32 v10, 0 :: v_dual_mov_b32 v11, s4
v_mov_b32_e32 v12, s5
s_clause 0x1
global_load_b64 v[13:14], v10, s[2:3] offset:32 glc
global_load_b64 v[2:3], v10, s[2:3] offset:40
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
v_readfirstlane_b32 s9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[8:9], s[8:9], s[4:5]
s_mul_i32 s9, s9, 24
s_mul_hi_u32 s10, s8, 24
s_mul_i32 s8, s8, 24
s_add_i32 s10, s10, s9
v_add_co_u32 v8, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v9, vcc_lo, s10, v1, vcc_lo
s_mov_b32 s8, exec_lo
global_store_b64 v[8:9], v[13:14], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v10, v[11:14], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[4:5], v[13:14]
s_cbranch_execz .LBB0_12
s_mov_b32 s9, 0
.LBB0_11:
v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
s_sleep 1
global_store_b64 v[8:9], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v10, v[2:5], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
s_or_b32 s9, vcc_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_11
.LBB0_12:
s_or_b32 exec_lo, exec_lo, s8
v_mov_b32_e32 v2, 0
s_mov_b32 s9, exec_lo
s_mov_b32 s8, exec_lo
v_mbcnt_lo_u32_b32 v4, s9, 0
global_load_b64 v[2:3], v2, s[2:3] offset:16
v_cmpx_eq_u32_e32 0, v4
s_cbranch_execz .LBB0_14
s_bcnt1_i32_b32 s9, s9
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[2:3], v[4:5], off offset:8
.LBB0_14:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt vmcnt(0)
global_load_b64 v[4:5], v[2:3], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
s_cbranch_vccnz .LBB0_16
global_load_b32 v2, v[2:3], off offset:24
v_mov_b32_e32 v3, 0
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
s_waitcnt_vscnt null, 0x0
global_store_b64 v[4:5], v[2:3], off
s_and_b32 m0, s8, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s7, 24
s_mul_hi_u32 s7, s6, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s7, s7, s1
s_mul_i32 s1, s6, 24
v_add_co_u32 v0, vcc_lo, v0, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_20
.p2align 6
.LBB0_17:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_19
s_sleep 1
s_cbranch_execnz .LBB0_20
s_branch .LBB0_22
.p2align 6
.LBB0_19:
s_branch .LBB0_22
.LBB0_20:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_17
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_17
.LBB0_22:
global_load_b64 v[22:23], v[6:7], off
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_26
v_mov_b32_e32 v6, 0
s_clause 0x2
global_load_b64 v[2:3], v6, s[2:3] offset:40
global_load_b64 v[7:8], v6, s[2:3] offset:24 glc
global_load_b64 v[4:5], v6, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v9, vcc_lo, v2, 1
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v9, s4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9
v_and_b32_e32 v3, v1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v2, v0, v2
v_mul_lo_u32 v3, v3, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_hi_u32 v9, v2, 24
v_mul_lo_u32 v2, v2, 24
v_add_nc_u32_e32 v3, v9, v3
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, v4, v2
v_mov_b32_e32 v2, v7
v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo
v_mov_b32_e32 v3, v8
global_store_b64 v[4:5], v[7:8], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_26
s_mov_b32 s0, 0
.LBB0_25:
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_25
.LBB0_26:
s_or_b32 exec_lo, exec_lo, s1
s_getpc_b64 s[4:5]
s_add_u32 s4, s4, .str@rel32@lo+4
s_addc_u32 s5, s5, .str@rel32@hi+12
s_mov_b32 s0, -1
s_cmp_lg_u64 s[4:5], 0
s_cbranch_scc0 .LBB0_105
s_waitcnt vmcnt(0)
v_dual_mov_b32 v1, v23 :: v_dual_and_b32 v0, -3, v22
v_mov_b32_e32 v25, 0
s_mov_b64 s[6:7], 12
s_branch .LBB0_29
.LBB0_28:
s_or_b32 exec_lo, exec_lo, s1
s_sub_u32 s6, s6, s8
s_subb_u32 s7, s7, s9
s_add_u32 s4, s4, s8
s_addc_u32 s5, s5, s9
s_cmp_lg_u64 s[6:7], 0
s_cbranch_scc0 .LBB0_104
.LBB0_29:
v_cmp_lt_u64_e64 s0, s[6:7], 56
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 s0, s0, exec_lo
s_cselect_b32 s8, s6, 56
s_cselect_b32 s9, s7, 0
s_cmp_gt_u32 s8, 7
s_mov_b32 s0, -1
s_cbranch_scc1 .LBB0_34
v_mov_b32_e32 v2, 0
v_mov_b32_e32 v3, 0
s_cmp_eq_u32 s8, 0
s_cbranch_scc1 .LBB0_33
s_lshl_b64 s[0:1], s[8:9], 3
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], s[4:5]
.LBB0_32:
global_load_u8 v4, v25, s[12:13]
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v4
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b64 v[4:5], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_add_u32 s12, s12, 1
s_addc_u32 s13, s13, 0
s_cmp_lg_u32 s0, s10
v_or_b32_e32 v2, v4, v2
v_or_b32_e32 v3, v5, v3
s_cbranch_scc1 .LBB0_32
.LBB0_33:
s_mov_b32 s0, 0
s_mov_b32 s15, 0
.LBB0_34:
s_and_not1_b32 vcc_lo, exec_lo, s0
s_mov_b64 s[0:1], s[4:5]
s_cbranch_vccnz .LBB0_36
global_load_b64 v[2:3], v25, s[4:5]
s_add_i32 s15, s8, -8
s_add_u32 s0, s4, 8
s_addc_u32 s1, s5, 0
.LBB0_36:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_41
v_mov_b32_e32 v4, 0
v_mov_b32_e32 v5, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_40
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_39:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v6, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v4, v6, v4
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v5, v7, v5
s_cbranch_scc1 .LBB0_39
.LBB0_40:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_42
s_branch .LBB0_43
.LBB0_41:
.LBB0_42:
global_load_b64 v[4:5], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_43:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_48
v_mov_b32_e32 v6, 0
v_mov_b32_e32 v7, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_47
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_46:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v8, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[8:9], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s14, s12
v_or_b32_e32 v6, v8, v6
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v7, v9, v7
s_cbranch_scc1 .LBB0_46
.LBB0_47:
s_mov_b32 s15, 0
s_cbranch_execz .LBB0_49
s_branch .LBB0_50
.LBB0_48:
.LBB0_49:
global_load_b64 v[6:7], v25, s[0:1]
s_add_i32 s15, s14, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_50:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_55
v_mov_b32_e32 v8, 0
v_mov_b32_e32 v9, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_54
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_53:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v10, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[10:11], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v8, v10, v8
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v9, v11, v9
s_cbranch_scc1 .LBB0_53
.LBB0_54:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_56
s_branch .LBB0_57
.LBB0_55:
.LBB0_56:
global_load_b64 v[8:9], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_57:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_62
v_mov_b32_e32 v10, 0
v_mov_b32_e32 v11, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_61
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_60:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v12, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[12:13], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s14, s12
v_or_b32_e32 v10, v12, v10
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v11, v13, v11
s_cbranch_scc1 .LBB0_60
.LBB0_61:
s_mov_b32 s15, 0
s_cbranch_execz .LBB0_63
s_branch .LBB0_64
.LBB0_62:
.LBB0_63:
global_load_b64 v[10:11], v25, s[0:1]
s_add_i32 s15, s14, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_64:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_69
v_mov_b32_e32 v12, 0
v_mov_b32_e32 v13, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_68
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_67:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v14, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[14:15], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v12, v14, v12
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v13, v15, v13
s_cbranch_scc1 .LBB0_67
.LBB0_68:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_70
s_branch .LBB0_71
.LBB0_69:
.LBB0_70:
global_load_b64 v[12:13], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_71:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_76
v_mov_b32_e32 v14, 0
v_mov_b32_e32 v15, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_75
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], s[0:1]
.LBB0_74:
global_load_u8 v16, v25, s[12:13]
s_add_i32 s14, s14, -1
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v16
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b64 v[16:17], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_add_u32 s12, s12, 1
s_addc_u32 s13, s13, 0
s_cmp_lg_u32 s14, 0
v_or_b32_e32 v14, v16, v14
v_or_b32_e32 v15, v17, v15
s_cbranch_scc1 .LBB0_74
.LBB0_75:
s_cbranch_execz .LBB0_77
s_branch .LBB0_78
.LBB0_76:
.LBB0_77:
global_load_b64 v[14:15], v25, s[0:1]
.LBB0_78:
v_mov_b32_e32 v24, v20
v_mov_b32_e32 v26, 0
v_mov_b32_e32 v27, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s0, v24
v_cmp_eq_u32_e64 s0, s0, v24
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_84
global_load_b64 v[18:19], v25, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[16:17], v25, s[2:3] offset:40
global_load_b64 v[26:27], v25, s[2:3]
s_mov_b32 s10, exec_lo
s_waitcnt vmcnt(1)
v_and_b32_e32 v17, v17, v19
v_and_b32_e32 v16, v16, v18
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v17, v17, 24
v_mul_hi_u32 v21, v16, 24
v_mul_lo_u32 v16, v16, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v17, v21, v17
s_waitcnt vmcnt(0)
v_add_co_u32 v16, vcc_lo, v26, v16
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v17, vcc_lo, v27, v17, vcc_lo
global_load_b64 v[16:17], v[16:17], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[26:27], v[18:19]
s_cbranch_execz .LBB0_83
s_mov_b32 s11, 0
.p2align 6
.LBB0_81:
s_sleep 1
s_clause 0x1
global_load_b64 v[16:17], v25, s[2:3] offset:40
global_load_b64 v[28:29], v25, s[2:3]
v_dual_mov_b32 v18, v26 :: v_dual_mov_b32 v19, v27
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_and_b32_e32 v16, v16, v18
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[26:27], null, v16, 24, v[28:29]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mov_b32 v16, v27 :: v_dual_and_b32 v17, v17, v19
v_mad_u64_u32 v[27:28], null, v17, 24, v[16:17]
global_load_b64 v[16:17], v[26:27], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[26:27], v[18:19]
s_or_b32 s11, vcc_lo, s11
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s11
s_cbranch_execnz .LBB0_81
s_or_b32 exec_lo, exec_lo, s11
.LBB0_83:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s10
.LBB0_84:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
s_clause 0x1
global_load_b64 v[28:29], v25, s[2:3] offset:40
global_load_b128 v[16:19], v25, s[2:3]
v_readfirstlane_b32 s10, v26
v_readfirstlane_b32 s11, v27
s_mov_b32 s14, exec_lo
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s12, v28
v_readfirstlane_b32 s13, v29
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[12:13], s[10:11], s[12:13]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_86
v_dual_mov_b32 v26, s14 :: v_dual_mov_b32 v27, 0
s_mul_i32 s14, s13, 24
s_mul_hi_u32 s15, s12, 24
v_dual_mov_b32 v28, 2 :: v_dual_mov_b32 v29, 1
s_add_i32 s15, s15, s14
s_mul_i32 s14, s12, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v30, vcc_lo, v16, s14
v_add_co_ci_u32_e32 v31, vcc_lo, s15, v17, vcc_lo
global_store_b128 v[30:31], v[26:29], off offset:8
.LBB0_86:
s_or_b32 exec_lo, exec_lo, s1
v_cmp_gt_u64_e64 vcc_lo, s[6:7], 56
v_or_b32_e32 v21, 2, v0
s_lshl_b64 s[14:15], s[12:13], 12
v_lshlrev_b64 v[26:27], 6, v[24:25]
s_lshl_b32 s1, s8, 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_i32 s1, s1, 28
v_cndmask_b32_e32 v0, v21, v0, vcc_lo
s_waitcnt vmcnt(0)
v_add_co_u32 v18, vcc_lo, v18, s14
v_add_co_ci_u32_e32 v19, vcc_lo, s15, v19, vcc_lo
s_and_b32 s1, s1, 0x1e0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v18, vcc_lo, v18, v26
v_and_or_b32 v0, v0, 0xffffff1f, s1
v_add_co_ci_u32_e32 v19, vcc_lo, v19, v27, vcc_lo
s_clause 0x3
global_store_b128 v[18:19], v[0:3], off
global_store_b128 v[18:19], v[4:7], off offset:16
global_store_b128 v[18:19], v[8:11], off offset:32
global_store_b128 v[18:19], v[12:15], off offset:48
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_94
s_clause 0x1
global_load_b64 v[8:9], v25, s[2:3] offset:32 glc
global_load_b64 v[0:1], v25, s[2:3] offset:40
v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s14, v0
v_readfirstlane_b32 s15, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[14:15], s[14:15], s[10:11]
s_mul_i32 s15, s15, 24
s_mul_hi_u32 s16, s14, 24
s_mul_i32 s14, s14, 24
s_add_i32 s16, s16, s15
v_add_co_u32 v4, vcc_lo, v16, s14
v_add_co_ci_u32_e32 v5, vcc_lo, s16, v17, vcc_lo
s_mov_b32 s14, exec_lo
global_store_b64 v[4:5], v[8:9], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v25, v[6:9], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[2:3], v[8:9]
s_cbranch_execz .LBB0_90
s_mov_b32 s15, 0
.LBB0_89:
v_dual_mov_b32 v0, s10 :: v_dual_mov_b32 v1, s11
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[0:1], v25, v[0:3], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
s_or_b32 s15, vcc_lo, s15
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s15
s_cbranch_execnz .LBB0_89
.LBB0_90:
s_or_b32 exec_lo, exec_lo, s14
global_load_b64 v[0:1], v25, s[2:3] offset:16
s_mov_b32 s15, exec_lo
s_mov_b32 s14, exec_lo
v_mbcnt_lo_u32_b32 v2, s15, 0
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v2
s_cbranch_execz .LBB0_92
s_bcnt1_i32_b32 s15, s15
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v2, s15
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[0:1], v[2:3], off offset:8
.LBB0_92:
s_or_b32 exec_lo, exec_lo, s14
s_waitcnt vmcnt(0)
global_load_b64 v[2:3], v[0:1], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
s_cbranch_vccnz .LBB0_94
global_load_b32 v24, v[0:1], off offset:24
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s14, v24
s_waitcnt_vscnt null, 0x0
global_store_b64 v[2:3], v[24:25], off
s_and_b32 m0, s14, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_94:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s13, 24
s_mul_hi_u32 s13, s12, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s13, s13, s1
s_mul_i32 s1, s12, 24
v_add_co_u32 v0, vcc_lo, v16, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s13, v17, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_98
.p2align 6
.LBB0_95:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_97
s_sleep 1
s_cbranch_execnz .LBB0_98
s_branch .LBB0_100
.p2align 6
.LBB0_97:
s_branch .LBB0_100
.LBB0_98:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_95
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_95
.LBB0_100:
global_load_b64 v[0:1], v[18:19], off
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_28
s_clause 0x2
global_load_b64 v[4:5], v25, s[2:3] offset:40
global_load_b64 v[8:9], v25, s[2:3] offset:24 glc
global_load_b64 v[6:7], v25, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v10, vcc_lo, v4, 1
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v10, s10
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
v_dual_cndmask_b32 v3, v3, v11 :: v_dual_cndmask_b32 v2, v2, v10
v_and_b32_e32 v5, v3, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_and_b32_e32 v4, v2, v4
v_mul_hi_u32 v10, v4, 24
v_mul_lo_u32 v4, v4, 24
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_u32 v6, vcc_lo, v6, v4
v_mov_b32_e32 v4, v8
v_mul_lo_u32 v5, v5, 24
v_add_nc_u32_e32 v5, v10, v5
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo
v_mov_b32_e32 v5, v9
global_store_b64 v[6:7], v[8:9], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v25, v[2:5], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[8:9]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_28
s_mov_b32 s0, 0
.LBB0_103:
s_sleep 1
global_store_b64 v[6:7], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[8:9], v25, v[2:5], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[8:9], v[4:5]
v_dual_mov_b32 v4, v8 :: v_dual_mov_b32 v5, v9
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_103
s_branch .LBB0_28
.LBB0_104:
s_mov_b32 s0, 0
.LBB0_105:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 vcc_lo, exec_lo, s0
s_cbranch_vccz .LBB0_133
v_readfirstlane_b32 s0, v20
v_mov_b32_e32 v4, 0
v_mov_b32_e32 v5, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s0, s0, v20
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_112
s_waitcnt vmcnt(0)
v_mov_b32_e32 v0, 0
s_mov_b32 s4, exec_lo
global_load_b64 v[6:7], v0, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[3:4], v0, s[2:3]
s_waitcnt vmcnt(1)
v_and_b32_e32 v1, v1, v6
v_and_b32_e32 v2, v2, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v5, v1, 24
v_mul_lo_u32 v2, v2, 24
v_mul_lo_u32 v1, v1, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v5, v2
s_waitcnt vmcnt(0)
v_add_co_u32 v1, vcc_lo, v3, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, v4, v2, vcc_lo
global_load_b64 v[4:5], v[1:2], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[4:5], v[6:7]
s_cbranch_execz .LBB0_111
s_mov_b32 s5, 0
.p2align 6
.LBB0_109:
s_sleep 1
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[8:9], v0, s[2:3]
v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_and_b32_e32 v1, v1, v6
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[3:4], null, v1, 24, v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mov_b32 v1, v4 :: v_dual_and_b32 v2, v2, v7
v_mad_u64_u32 v[4:5], null, v2, 24, v[1:2]
global_load_b64 v[4:5], v[3:4], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
s_or_b32 s5, vcc_lo, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s5
s_cbranch_execnz .LBB0_109
s_or_b32 exec_lo, exec_lo, s5
.LBB0_111:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s4
.LBB0_112:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
v_mov_b32_e32 v21, 0
v_readfirstlane_b32 s4, v4
v_readfirstlane_b32 s5, v5
s_mov_b32 s8, exec_lo
s_clause 0x1
global_load_b64 v[6:7], v21, s[2:3] offset:40
global_load_b128 v[0:3], v21, s[2:3]
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s6, v6
v_readfirstlane_b32 s7, v7
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[6:7], s[4:5], s[6:7]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_114
v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, 0
s_mul_i32 s8, s7, 24
s_mul_hi_u32 s9, s6, 24
v_dual_mov_b32 v6, 2 :: v_dual_mov_b32 v7, 1
s_add_i32 s9, s9, s8
s_mul_i32 s8, s6, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v8, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v9, vcc_lo, s9, v1, vcc_lo
global_store_b128 v[8:9], v[4:7], off offset:8
.LBB0_114:
s_or_b32 exec_lo, exec_lo, s1
s_lshl_b64 s[8:9], s[6:7], 12
v_and_or_b32 v22, v22, 0xffffff1d, 34
s_waitcnt vmcnt(0)
v_add_co_u32 v4, vcc_lo, v2, s8
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v3, vcc_lo
v_lshlrev_b64 v[2:3], 6, v[20:21]
s_mov_b32 s8, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_mov_b32 s9, s8
s_mov_b32 s10, s8
s_mov_b32 s11, s8
v_add_co_u32 v8, vcc_lo, v4, v2
v_mov_b32_e32 v6, 0
v_add_co_ci_u32_e32 v9, vcc_lo, v5, v3, vcc_lo
v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v5, s11
v_dual_mov_b32 v3, s9 :: v_dual_mov_b32 v4, s10
s_delay_alu instid0(VALU_DEP_4)
v_mov_b32_e32 v7, v6
s_clause 0x4
global_store_b64 v[8:9], v[22:23], off
global_store_b128 v[8:9], v[2:5], off offset:8
global_store_b128 v[8:9], v[2:5], off offset:24
global_store_b128 v[8:9], v[2:5], off offset:40
global_store_b64 v[8:9], v[6:7], off offset:56
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_122
v_dual_mov_b32 v8, 0 :: v_dual_mov_b32 v9, s4
v_mov_b32_e32 v10, s5
s_clause 0x1
global_load_b64 v[11:12], v8, s[2:3] offset:32 glc
global_load_b64 v[2:3], v8, s[2:3] offset:40
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
v_readfirstlane_b32 s9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[8:9], s[8:9], s[4:5]
s_mul_i32 s9, s9, 24
s_mul_hi_u32 s10, s8, 24
s_mul_i32 s8, s8, 24
s_add_i32 s10, s10, s9
v_add_co_u32 v6, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v7, vcc_lo, s10, v1, vcc_lo
s_mov_b32 s8, exec_lo
global_store_b64 v[6:7], v[11:12], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v8, v[9:12], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[4:5], v[11:12]
s_cbranch_execz .LBB0_118
s_mov_b32 s9, 0
.LBB0_117:
v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
s_sleep 1
global_store_b64 v[6:7], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v8, v[2:5], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
s_or_b32 s9, vcc_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_117
.LBB0_118:
s_or_b32 exec_lo, exec_lo, s8
v_mov_b32_e32 v2, 0
s_mov_b32 s9, exec_lo
s_mov_b32 s8, exec_lo
v_mbcnt_lo_u32_b32 v4, s9, 0
global_load_b64 v[2:3], v2, s[2:3] offset:16
v_cmpx_eq_u32_e32 0, v4
s_cbranch_execz .LBB0_120
s_bcnt1_i32_b32 s9, s9
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[2:3], v[4:5], off offset:8
.LBB0_120:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt vmcnt(0)
global_load_b64 v[4:5], v[2:3], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
s_cbranch_vccnz .LBB0_122
global_load_b32 v2, v[2:3], off offset:24
v_mov_b32_e32 v3, 0
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
s_waitcnt_vscnt null, 0x0
global_store_b64 v[4:5], v[2:3], off
s_and_b32 m0, s8, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_122:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s7, 24
s_mul_hi_u32 s7, s6, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s7, s7, s1
s_mul_i32 s1, s6, 24
v_add_co_u32 v0, vcc_lo, v0, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_126
.p2align 6
.LBB0_123:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_125
s_sleep 1
s_cbranch_execnz .LBB0_126
s_branch .LBB0_128
.p2align 6
.LBB0_125:
s_branch .LBB0_128
.LBB0_126:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_123
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_123
.LBB0_128:
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_132
v_mov_b32_e32 v6, 0
s_clause 0x2
global_load_b64 v[2:3], v6, s[2:3] offset:40
global_load_b64 v[7:8], v6, s[2:3] offset:24 glc
global_load_b64 v[4:5], v6, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v9, vcc_lo, v2, 1
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v9, s4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9
v_and_b32_e32 v3, v1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v2, v0, v2
v_mul_lo_u32 v3, v3, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_hi_u32 v9, v2, 24
v_mul_lo_u32 v2, v2, 24
v_add_nc_u32_e32 v3, v9, v3
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, v4, v2
v_mov_b32_e32 v2, v7
v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo
v_mov_b32_e32 v3, v8
global_store_b64 v[4:5], v[7:8], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_132
s_mov_b32 s0, 0
.LBB0_131:
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_131
.LBB0_132:
s_or_b32 exec_lo, exec_lo, s1
.LBB0_133:
s_waitcnt vmcnt(0) lgkmcnt(0)
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 256
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 32
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelv, .Lfunc_end0-_Z6kernelv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type .str,@object
.section .rodata.str1.1,"aMS",@progbits,1
.str:
.asciz "hello cuda\n"
.size .str, 12
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: hidden_block_count_x
- .offset: 4
.size: 4
.value_kind: hidden_block_count_y
- .offset: 8
.size: 4
.value_kind: hidden_block_count_z
- .offset: 12
.size: 2
.value_kind: hidden_group_size_x
- .offset: 14
.size: 2
.value_kind: hidden_group_size_y
- .offset: 16
.size: 2
.value_kind: hidden_group_size_z
- .offset: 18
.size: 2
.value_kind: hidden_remainder_x
- .offset: 20
.size: 2
.value_kind: hidden_remainder_y
- .offset: 22
.size: 2
.value_kind: hidden_remainder_z
- .offset: 40
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 64
.size: 2
.value_kind: hidden_grid_dims
- .offset: 80
.size: 8
.value_kind: hidden_hostcall_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 256
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelv
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z6kernelv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 32
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000bd22a_00000000-6_hello_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z24__device_stub__Z6kernelvv
.type _Z24__device_stub__Z6kernelvv, @function
_Z24__device_stub__Z6kernelvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z6kernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z24__device_stub__Z6kernelvv, .-_Z24__device_stub__Z6kernelvv
.globl _Z6kernelv
.type _Z6kernelv, @function
_Z6kernelv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z24__device_stub__Z6kernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6kernelv, .-_Z6kernelv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "a = %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
call cudaDeviceSynchronize@PLT
movl $10000000, %eax
.L13:
subl $1, %eax
jne .L13
movl $10000000, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
call _Z24__device_stub__Z6kernelvv
jmp .L12
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z6kernelv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "hello_cuda.hip"
.globl _Z21__device_stub__kernelv # -- Begin function _Z21__device_stub__kernelv
.p2align 4, 0x90
.type _Z21__device_stub__kernelv,@function
_Z21__device_stub__kernelv: # @_Z21__device_stub__kernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z6kernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelv, .Lfunc_end0-_Z21__device_stub__kernelv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z6kernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
movl $.L.str, %edi
movl $10000000, %esi # imm = 0x989680
xorl %eax, %eax
callq printf
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelv,@object # @_Z6kernelv
.section .rodata,"a",@progbits
.globl _Z6kernelv
.p2align 3, 0x0
_Z6kernelv:
.quad _Z21__device_stub__kernelv
.size _Z6kernelv, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "a = %d\n"
.size .L.str, 8
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kernelv"
.size .L__unnamed_1, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#define N (500*1024)
__global__ void add(int *a, int *b, int *c){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = a[tid] + b[tid];
tid += blockDim.x + gridDim.x;
}
}
int main( void ){
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate memory on the GPU
cudaMalloc( (void**) &dev_a, N * sizeof(int));
cudaMalloc( (void**) &dev_b, N * sizeof(int));
cudaMalloc( (void**) &dev_c, N * sizeof(int));
// fill the array a and b on the CPU
for(int i=0; i<N; i++){
a[i] = i;
b[i] = i*i;
}
// Copy the arrays a and b into GPU
cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
add<<<128,128>>>(dev_a, dev_b, dev_c);
// Cpy the array back from memory
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
// Verification
bool success = true;
for(int i=0; i<N; i++){
if((a[i] + b[i]) != c[i]){
printf("Error: %d + %d = %d\n", a[i], b[i], c[i]);
success = false;
}
}
if(success) printf("We did it!!!\n");
// Free memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | code for sm_80
Function : _Z3addPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.AND P0, PT, R0, 0x7cfff, PT ; /* 0x0007cfff0000780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0070*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x001fd400000001ff */
/*0080*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R4, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fe200078e0207 */
/*00d0*/ MOV R11, c[0x0][0xc] ; /* 0x00000300000b7a02 */
/* 0x000fc80000000f00 */
/*00e0*/ IADD3 R0, R0, c[0x0][0x0], R11 ; /* 0x0000000000007a10 */
/* 0x000fc80007ffe00b */
/*00f0*/ ISETP.GE.AND P0, PT, R0, 0x7d000, PT ; /* 0x0007d0000000780c */
/* 0x000fe40003f06270 */
/*0100*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*0110*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001ec000c101904 */
/*0120*/ @!P0 BRA 0x70 ; /* 0xffffff4000008947 */
/* 0x000fea000383ffff */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#define N (500*1024)
__global__ void add(int *a, int *b, int *c){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = a[tid] + b[tid];
tid += blockDim.x + gridDim.x;
}
}
int main( void ){
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate memory on the GPU
cudaMalloc( (void**) &dev_a, N * sizeof(int));
cudaMalloc( (void**) &dev_b, N * sizeof(int));
cudaMalloc( (void**) &dev_c, N * sizeof(int));
// fill the array a and b on the CPU
for(int i=0; i<N; i++){
a[i] = i;
b[i] = i*i;
}
// Copy the arrays a and b into GPU
cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
add<<<128,128>>>(dev_a, dev_b, dev_c);
// Cpy the array back from memory
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
// Verification
bool success = true;
for(int i=0; i<N; i++){
if((a[i] + b[i]) != c[i]){
printf("Error: %d + %d = %d\n", a[i], b[i], c[i]);
success = false;
}
}
if(success) printf("We did it!!!\n");
// Free memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | .file "tmpxft_00116585_00000000-6_vectorAdd.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z3addPiS_S_PiS_S_
.type _Z26__device_stub__Z3addPiS_S_PiS_S_, @function
_Z26__device_stub__Z3addPiS_S_PiS_S_:
.LFB3694:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z26__device_stub__Z3addPiS_S_PiS_S_, .-_Z26__device_stub__Z3addPiS_S_PiS_S_
.globl _Z3addPiS_S_
.type _Z3addPiS_S_, @function
_Z3addPiS_S_:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z3addPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z3addPiS_S_, .-_Z3addPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Error: %d + %d = %d\n"
.LC1:
.string "We did it!!!\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
leaq -6144000(%rsp), %r11
.cfi_def_cfa 11, 6144024
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $72, %rsp
.cfi_def_cfa_offset 6144096
movq %fs:40, %rax
movq %rax, 6144056(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $2048000, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $2048000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $2048000, %esi
call cudaMalloc@PLT
movl $0, %eax
.L12:
movl %eax, 48(%rsp,%rax,4)
movl %eax, %edx
imull %eax, %edx
movl %edx, 2048048(%rsp,%rax,4)
addq $1, %rax
cmpq $512000, %rax
jne .L12
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $2048000, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 2048048(%rsp), %rsi
movl $1, %ecx
movl $2048000, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $128, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $128, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L21
.L13:
leaq 4096048(%rsp), %rdi
movl $2, %ecx
movl $2048000, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movl $0, %ebx
movl $1, %esi
leaq .LC0(%rip), %rbp
jmp .L15
.L21:
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z26__device_stub__Z3addPiS_S_PiS_S_
jmp .L13
.L14:
addq $4, %rbx
cmpq $2048000, %rbx
je .L22
.L15:
movl 48(%rsp,%rbx), %edx
movl 2048048(%rsp,%rbx), %ecx
movl 4096048(%rsp,%rbx), %r8d
leal (%rdx,%rcx), %eax
cmpl %r8d, %eax
je .L14
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %esi
jmp .L14
.L22:
testb %sil, %sil
jne .L23
.L16:
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 6144056(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $6144072, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L16
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z3addPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#define N (500*1024)
__global__ void add(int *a, int *b, int *c){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = a[tid] + b[tid];
tid += blockDim.x + gridDim.x;
}
}
int main( void ){
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate memory on the GPU
cudaMalloc( (void**) &dev_a, N * sizeof(int));
cudaMalloc( (void**) &dev_b, N * sizeof(int));
cudaMalloc( (void**) &dev_c, N * sizeof(int));
// fill the array a and b on the CPU
for(int i=0; i<N; i++){
a[i] = i;
b[i] = i*i;
}
// Copy the arrays a and b into GPU
cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
add<<<128,128>>>(dev_a, dev_b, dev_c);
// Cpy the array back from memory
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
// Verification
bool success = true;
for(int i=0; i<N; i++){
if((a[i] + b[i]) != c[i]){
printf("Error: %d + %d = %d\n", a[i], b[i], c[i]);
success = false;
}
}
if(success) printf("We did it!!!\n");
// Free memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
#define N (500*1024)
__global__ void add(int *a, int *b, int *c){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = a[tid] + b[tid];
tid += blockDim.x + gridDim.x;
}
}
int main( void ){
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate memory on the GPU
hipMalloc( (void**) &dev_a, N * sizeof(int));
hipMalloc( (void**) &dev_b, N * sizeof(int));
hipMalloc( (void**) &dev_c, N * sizeof(int));
// fill the array a and b on the CPU
for(int i=0; i<N; i++){
a[i] = i;
b[i] = i*i;
}
// Copy the arrays a and b into GPU
hipMemcpy( dev_a, a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( dev_b, b, N * sizeof(int), hipMemcpyHostToDevice);
add<<<128,128>>>(dev_a, dev_b, dev_c);
// Cpy the array back from memory
hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost);
// Verification
bool success = true;
for(int i=0; i<N; i++){
if((a[i] + b[i]) != c[i]){
printf("Error: %d + %d = %d\n", a[i], b[i], c[i]);
success = false;
}
}
if(success) printf("We did it!!!\n");
// Free memory
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#define N (500*1024)
__global__ void add(int *a, int *b, int *c){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = a[tid] + b[tid];
tid += blockDim.x + gridDim.x;
}
}
int main( void ){
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate memory on the GPU
hipMalloc( (void**) &dev_a, N * sizeof(int));
hipMalloc( (void**) &dev_b, N * sizeof(int));
hipMalloc( (void**) &dev_c, N * sizeof(int));
// fill the array a and b on the CPU
for(int i=0; i<N; i++){
a[i] = i;
b[i] = i*i;
}
// Copy the arrays a and b into GPU
hipMemcpy( dev_a, a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( dev_b, b, N * sizeof(int), hipMemcpyHostToDevice);
add<<<128,128>>>(dev_a, dev_b, dev_c);
// Cpy the array back from memory
hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost);
// Verification
bool success = true;
for(int i=0; i<N; i++){
if((a[i] + b[i]) != c[i]){
printf("Error: %d + %d = %d\n", a[i], b[i], c[i]);
success = false;
}
}
if(success) printf("We did it!!!\n");
// Free memory
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_
.globl _Z3addPiS_S_
.p2align 8
.type _Z3addPiS_S_,@function
_Z3addPiS_S_:
s_load_b32 s4, s[0:1], 0x24
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e32 0x7d000, v1
s_cbranch_execz .LBB0_3
s_load_b32 s9, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_add_i32 s1, s9, s8
s_mov_b32 s8, 0
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_nc_u32_e32 v1, s1, v1
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
v_cmp_lt_i32_e32 vcc_lo, 0x7cfff, v1
global_load_b32 v0, v[4:5], off
global_load_b32 v4, v[6:7], off
v_add_co_u32 v2, s0, s2, v2
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s0, s3, v3, s0
s_or_b32 s8, vcc_lo, s8
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v0, v4, v0
global_store_b32 v[2:3], v0, off
s_and_not1_b32 exec_lo, exec_lo, s8
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_, .Lfunc_end0-_Z3addPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#define N (500*1024)
__global__ void add(int *a, int *b, int *c){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = a[tid] + b[tid];
tid += blockDim.x + gridDim.x;
}
}
int main( void ){
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate memory on the GPU
hipMalloc( (void**) &dev_a, N * sizeof(int));
hipMalloc( (void**) &dev_b, N * sizeof(int));
hipMalloc( (void**) &dev_c, N * sizeof(int));
// fill the array a and b on the CPU
for(int i=0; i<N; i++){
a[i] = i;
b[i] = i*i;
}
// Copy the arrays a and b into GPU
hipMemcpy( dev_a, a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( dev_b, b, N * sizeof(int), hipMemcpyHostToDevice);
add<<<128,128>>>(dev_a, dev_b, dev_c);
// Cpy the array back from memory
hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost);
// Verification
bool success = true;
for(int i=0; i<N; i++){
if((a[i] + b[i]) != c[i]){
printf("Error: %d + %d = %d\n", a[i], b[i], c[i]);
success = false;
}
}
if(success) printf("We did it!!!\n");
// Free memory
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} | .text
.file "vectorAdd.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__addPiS_S_ # -- Begin function _Z18__device_stub__addPiS_S_
.p2align 4, 0x90
.type _Z18__device_stub__addPiS_S_,@function
_Z18__device_stub__addPiS_S_: # @_Z18__device_stub__addPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiS_S_, .Lfunc_end0-_Z18__device_stub__addPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $6144104, %rsp # imm = 0x5DC068
.cfi_def_cfa_offset 6144128
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
leaq 16(%rsp), %rdi
movl $2048000, %esi # imm = 0x1F4000
callq hipMalloc
leaq 8(%rsp), %rdi
movl $2048000, %esi # imm = 0x1F4000
callq hipMalloc
movq %rsp, %rdi
movl $2048000, %esi # imm = 0x1F4000
callq hipMalloc
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, 4096096(%rsp,%rax,4)
movl %eax, %ecx
imull %eax, %ecx
movl %ecx, 2048096(%rsp,%rax,4)
incq %rax
cmpq $512000, %rax # imm = 0x7D000
jne .LBB1_1
# %bb.2:
movq 16(%rsp), %rdi
leaq 4096096(%rsp), %rsi
movl $2048000, %edx # imm = 0x1F4000
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 2048096(%rsp), %rsi
movl $2048000, %edx # imm = 0x1F4000
movl $1, %ecx
callq hipMemcpy
movabsq $4294967424, %rdi # imm = 0x100000080
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
leaq 96(%rsp), %rdi
movl $2048000, %edx # imm = 0x1F4000
movl $2, %ecx
callq hipMemcpy
movb $1, %bl
xorl %r14d, %r14d
jmp .LBB1_5
.p2align 4, 0x90
.LBB1_7: # in Loop: Header=BB1_5 Depth=1
incq %r14
cmpq $512000, %r14 # imm = 0x7D000
je .LBB1_8
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl 4096096(%rsp,%r14,4), %esi
movl 2048096(%rsp,%r14,4), %edx
leal (%rdx,%rsi), %eax
movl 96(%rsp,%r14,4), %ecx
cmpl %ecx, %eax
je .LBB1_7
# %bb.6: # in Loop: Header=BB1_5 Depth=1
xorl %ebx, %ebx
movl $.L.str, %edi
# kill: def $esi killed $esi killed $rsi
# kill: def $edx killed $edx killed $rdx
xorl %eax, %eax
callq printf
jmp .LBB1_7
.LBB1_8:
testb $1, %bl
je .LBB1_10
# %bb.9:
movl $.Lstr, %edi
callq puts@PLT
.LBB1_10:
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $6144104, %rsp # imm = 0x5DC068
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiS_S_,@object # @_Z3addPiS_S_
.section .rodata,"a",@progbits
.globl _Z3addPiS_S_
.p2align 3, 0x0
_Z3addPiS_S_:
.quad _Z18__device_stub__addPiS_S_
.size _Z3addPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error: %d + %d = %d\n"
.size .L.str, 21
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPiS_S_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "We did it!!!"
.size .Lstr, 13
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3addPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.AND P0, PT, R0, 0x7cfff, PT ; /* 0x0007cfff0000780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0070*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x001fd400000001ff */
/*0080*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R4, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fe200078e0207 */
/*00d0*/ MOV R11, c[0x0][0xc] ; /* 0x00000300000b7a02 */
/* 0x000fc80000000f00 */
/*00e0*/ IADD3 R0, R0, c[0x0][0x0], R11 ; /* 0x0000000000007a10 */
/* 0x000fc80007ffe00b */
/*00f0*/ ISETP.GE.AND P0, PT, R0, 0x7d000, PT ; /* 0x0007d0000000780c */
/* 0x000fe40003f06270 */
/*0100*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*0110*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001ec000c101904 */
/*0120*/ @!P0 BRA 0x70 ; /* 0xffffff4000008947 */
/* 0x000fea000383ffff */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_
.globl _Z3addPiS_S_
.p2align 8
.type _Z3addPiS_S_,@function
_Z3addPiS_S_:
s_load_b32 s4, s[0:1], 0x24
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e32 0x7d000, v1
s_cbranch_execz .LBB0_3
s_load_b32 s9, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_add_i32 s1, s9, s8
s_mov_b32 s8, 0
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_nc_u32_e32 v1, s1, v1
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
v_cmp_lt_i32_e32 vcc_lo, 0x7cfff, v1
global_load_b32 v0, v[4:5], off
global_load_b32 v4, v[6:7], off
v_add_co_u32 v2, s0, s2, v2
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s0, s3, v3, s0
s_or_b32 s8, vcc_lo, s8
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v0, v4, v0
global_store_b32 v[2:3], v0, off
s_and_not1_b32 exec_lo, exec_lo, s8
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_, .Lfunc_end0-_Z3addPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00116585_00000000-6_vectorAdd.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z3addPiS_S_PiS_S_
.type _Z26__device_stub__Z3addPiS_S_PiS_S_, @function
_Z26__device_stub__Z3addPiS_S_PiS_S_:
.LFB3694:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z26__device_stub__Z3addPiS_S_PiS_S_, .-_Z26__device_stub__Z3addPiS_S_PiS_S_
.globl _Z3addPiS_S_
.type _Z3addPiS_S_, @function
_Z3addPiS_S_:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z3addPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z3addPiS_S_, .-_Z3addPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Error: %d + %d = %d\n"
.LC1:
.string "We did it!!!\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
leaq -6144000(%rsp), %r11
.cfi_def_cfa 11, 6144024
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $72, %rsp
.cfi_def_cfa_offset 6144096
movq %fs:40, %rax
movq %rax, 6144056(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $2048000, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $2048000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $2048000, %esi
call cudaMalloc@PLT
movl $0, %eax
.L12:
movl %eax, 48(%rsp,%rax,4)
movl %eax, %edx
imull %eax, %edx
movl %edx, 2048048(%rsp,%rax,4)
addq $1, %rax
cmpq $512000, %rax
jne .L12
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $2048000, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 2048048(%rsp), %rsi
movl $1, %ecx
movl $2048000, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $128, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $128, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L21
.L13:
leaq 4096048(%rsp), %rdi
movl $2, %ecx
movl $2048000, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movl $0, %ebx
movl $1, %esi
leaq .LC0(%rip), %rbp
jmp .L15
.L21:
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z26__device_stub__Z3addPiS_S_PiS_S_
jmp .L13
.L14:
addq $4, %rbx
cmpq $2048000, %rbx
je .L22
.L15:
movl 48(%rsp,%rbx), %edx
movl 2048048(%rsp,%rbx), %ecx
movl 4096048(%rsp,%rbx), %r8d
leal (%rdx,%rcx), %eax
cmpl %r8d, %eax
je .L14
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %esi
jmp .L14
.L22:
testb %sil, %sil
jne .L23
.L16:
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 6144056(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $6144072, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L16
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z3addPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "vectorAdd.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__addPiS_S_ # -- Begin function _Z18__device_stub__addPiS_S_
.p2align 4, 0x90
.type _Z18__device_stub__addPiS_S_,@function
_Z18__device_stub__addPiS_S_: # @_Z18__device_stub__addPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiS_S_, .Lfunc_end0-_Z18__device_stub__addPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $6144104, %rsp # imm = 0x5DC068
.cfi_def_cfa_offset 6144128
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
leaq 16(%rsp), %rdi
movl $2048000, %esi # imm = 0x1F4000
callq hipMalloc
leaq 8(%rsp), %rdi
movl $2048000, %esi # imm = 0x1F4000
callq hipMalloc
movq %rsp, %rdi
movl $2048000, %esi # imm = 0x1F4000
callq hipMalloc
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, 4096096(%rsp,%rax,4)
movl %eax, %ecx
imull %eax, %ecx
movl %ecx, 2048096(%rsp,%rax,4)
incq %rax
cmpq $512000, %rax # imm = 0x7D000
jne .LBB1_1
# %bb.2:
movq 16(%rsp), %rdi
leaq 4096096(%rsp), %rsi
movl $2048000, %edx # imm = 0x1F4000
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 2048096(%rsp), %rsi
movl $2048000, %edx # imm = 0x1F4000
movl $1, %ecx
callq hipMemcpy
movabsq $4294967424, %rdi # imm = 0x100000080
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
leaq 96(%rsp), %rdi
movl $2048000, %edx # imm = 0x1F4000
movl $2, %ecx
callq hipMemcpy
movb $1, %bl
xorl %r14d, %r14d
jmp .LBB1_5
.p2align 4, 0x90
.LBB1_7: # in Loop: Header=BB1_5 Depth=1
incq %r14
cmpq $512000, %r14 # imm = 0x7D000
je .LBB1_8
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl 4096096(%rsp,%r14,4), %esi
movl 2048096(%rsp,%r14,4), %edx
leal (%rdx,%rsi), %eax
movl 96(%rsp,%r14,4), %ecx
cmpl %ecx, %eax
je .LBB1_7
# %bb.6: # in Loop: Header=BB1_5 Depth=1
xorl %ebx, %ebx
movl $.L.str, %edi
# kill: def $esi killed $esi killed $rsi
# kill: def $edx killed $edx killed $rdx
xorl %eax, %eax
callq printf
jmp .LBB1_7
.LBB1_8:
testb $1, %bl
je .LBB1_10
# %bb.9:
movl $.Lstr, %edi
callq puts@PLT
.LBB1_10:
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $6144104, %rsp # imm = 0x5DC068
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiS_S_,@object # @_Z3addPiS_S_
.section .rodata,"a",@progbits
.globl _Z3addPiS_S_
.p2align 3, 0x0
_Z3addPiS_S_:
.quad _Z18__device_stub__addPiS_S_
.size _Z3addPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error: %d + %d = %d\n"
.size .L.str, 21
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPiS_S_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "We did it!!!"
.size .Lstr, 13
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
int main()
{
cudaDeviceProp prop;
int dev;
int stat;
int count;
int i;
cudaGetDeviceCount(&count); // count is updated with No. of GPU-s.
for (i = 0; i < count; i++) {
cudaGetDeviceProperties(&prop, i);
printf("\n--- General information for device %d ---", i);
printf("\nName: %s.", prop.name);
printf("\nunifiedAddressing: %d.", prop.unifiedAddressing);
printf("\nCompute capability: %d.%d", prop.major, prop.minor);
printf("\nCompute mode: 0x%x", prop.computeMode);
printf("\nClock rate: %d", prop.clockRate);
printf("\nDevice copy overlap: ", prop.deviceOverlap);
printf("\nKernel execution timeout: %d", prop.kernelExecTimeoutEnabled);
printf("\naSync engine count: %d", prop.asyncEngineCount);
printf("\nConcurrent kernels: %d", prop.concurrentKernels);
printf("\nCan map host memory: %d", prop.canMapHostMemory);
printf("\nPCI Bus Device Domain: %d %d %d", prop.pciBusID, prop.pciDeviceID, prop.pciDomainID);
printf("\nTotal global memory: 0x%x", prop.totalGlobalMem);
printf("\nTotal const memory: 0x%x", prop.totalConstMem);
printf("\nTotal shared memory/block: 0x%x", prop.sharedMemPerBlock);
printf("\nTotal shared memory/multiprocessor: 0x%x", prop.sharedMemPerMultiprocessor);
printf("\nMemory bus width: %d", prop.memoryBusWidth);
printf("\nintegated: ", prop.integrated);
printf("\nmaxGridSize: 0%d", prop.maxGridSize);
printf("\nmaxThreadsDim: 0x%x", prop.maxThreadsDim);
printf("\nmaxThreadsPerBlock: %d ", prop.maxThreadsPerBlock);
printf("\nmaxThreadsPerMultiProcessor: %d", prop.maxThreadsPerMultiProcessor);
printf("\nmultiProcessorCount: %d", prop.multiProcessorCount);
}
getchar();
return 0;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
int main()
{
cudaDeviceProp prop;
int dev;
int stat;
int count;
int i;
cudaGetDeviceCount(&count); // count is updated with No. of GPU-s.
for (i = 0; i < count; i++) {
cudaGetDeviceProperties(&prop, i);
printf("\n--- General information for device %d ---", i);
printf("\nName: %s.", prop.name);
printf("\nunifiedAddressing: %d.", prop.unifiedAddressing);
printf("\nCompute capability: %d.%d", prop.major, prop.minor);
printf("\nCompute mode: 0x%x", prop.computeMode);
printf("\nClock rate: %d", prop.clockRate);
printf("\nDevice copy overlap: ", prop.deviceOverlap);
printf("\nKernel execution timeout: %d", prop.kernelExecTimeoutEnabled);
printf("\naSync engine count: %d", prop.asyncEngineCount);
printf("\nConcurrent kernels: %d", prop.concurrentKernels);
printf("\nCan map host memory: %d", prop.canMapHostMemory);
printf("\nPCI Bus Device Domain: %d %d %d", prop.pciBusID, prop.pciDeviceID, prop.pciDomainID);
printf("\nTotal global memory: 0x%x", prop.totalGlobalMem);
printf("\nTotal const memory: 0x%x", prop.totalConstMem);
printf("\nTotal shared memory/block: 0x%x", prop.sharedMemPerBlock);
printf("\nTotal shared memory/multiprocessor: 0x%x", prop.sharedMemPerMultiprocessor);
printf("\nMemory bus width: %d", prop.memoryBusWidth);
printf("\nintegated: ", prop.integrated);
printf("\nmaxGridSize: 0%d", prop.maxGridSize);
printf("\nmaxThreadsDim: 0x%x", prop.maxThreadsDim);
printf("\nmaxThreadsPerBlock: %d ", prop.maxThreadsPerBlock);
printf("\nmaxThreadsPerMultiProcessor: %d", prop.maxThreadsPerMultiProcessor);
printf("\nmultiProcessorCount: %d", prop.multiProcessorCount);
}
getchar();
return 0;
} | .file "tmpxft_00178a2b_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "\n--- General information for device %d ---"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "\nName: %s."
.LC2:
.string "\nunifiedAddressing: %d."
.LC3:
.string "\nCompute capability: %d.%d"
.LC4:
.string "\nCompute mode: 0x%x"
.LC5:
.string "\nClock rate: %d"
.LC6:
.string "\nDevice copy overlap: "
.LC7:
.string "\nKernel execution timeout: %d"
.LC8:
.string "\naSync engine count: %d"
.LC9:
.string "\nConcurrent kernels: %d"
.LC10:
.string "\nCan map host memory: %d"
.section .rodata.str1.8
.align 8
.LC11:
.string "\nPCI Bus Device Domain: %d %d %d"
.section .rodata.str1.1
.LC12:
.string "\nTotal global memory: 0x%x"
.LC13:
.string "\nTotal const memory: 0x%x"
.section .rodata.str1.8
.align 8
.LC14:
.string "\nTotal shared memory/block: 0x%x"
.align 8
.LC15:
.string "\nTotal shared memory/multiprocessor: 0x%x"
.section .rodata.str1.1
.LC16:
.string "\nMemory bus width: %d"
.LC17:
.string "\nintegated: "
.LC18:
.string "\nmaxGridSize: 0%d"
.LC19:
.string "\nmaxThreadsDim: 0x%x"
.LC20:
.string "\nmaxThreadsPerBlock: %d "
.section .rodata.str1.8
.align 8
.LC21:
.string "\nmaxThreadsPerMultiProcessor: %d"
.section .rodata.str1.1
.LC22:
.string "\nmultiProcessorCount: %d"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $1064, %rsp
.cfi_def_cfa_offset 1120
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
cmpl $0, 12(%rsp)
jle .L4
movl $0, %ebx
leaq .LC0(%rip), %r15
leaq .LC1(%rip), %r14
leaq .LC2(%rip), %r13
leaq .LC3(%rip), %r12
.L5:
leaq 16(%rsp), %rbp
movl %ebx, %esi
movq %rbp, %rdi
call cudaGetDeviceProperties_v2@PLT
movl %ebx, %edx
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbp, %rdx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 620(%rsp), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 380(%rsp), %ecx
movl 376(%rsp), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 420(%rsp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 364(%rsp), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 400(%rsp), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 408(%rsp), %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 616(%rsp), %edx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 592(%rsp), %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 416(%rsp), %edx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 608(%rsp), %r8d
movl 604(%rsp), %ecx
movl 600(%rsp), %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 304(%rsp), %rdx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 368(%rsp), %rdx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 312(%rsp), %rdx
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 656(%rsp), %rdx
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 628(%rsp), %edx
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 412(%rsp), %edx
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 352(%rsp), %rdx
leaq .LC18(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 340(%rsp), %rdx
leaq .LC19(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %edx
leaq .LC20(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 640(%rsp), %edx
leaq .LC21(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 404(%rsp), %edx
leaq .LC22(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, 12(%rsp)
jg .L5
.L4:
movq stdin(%rip), %rdi
call getc@PLT
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L9
movl $0, %eax
addq $1064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
int main()
{
cudaDeviceProp prop;
int dev;
int stat;
int count;
int i;
cudaGetDeviceCount(&count); // count is updated with No. of GPU-s.
for (i = 0; i < count; i++) {
cudaGetDeviceProperties(&prop, i);
printf("\n--- General information for device %d ---", i);
printf("\nName: %s.", prop.name);
printf("\nunifiedAddressing: %d.", prop.unifiedAddressing);
printf("\nCompute capability: %d.%d", prop.major, prop.minor);
printf("\nCompute mode: 0x%x", prop.computeMode);
printf("\nClock rate: %d", prop.clockRate);
printf("\nDevice copy overlap: ", prop.deviceOverlap);
printf("\nKernel execution timeout: %d", prop.kernelExecTimeoutEnabled);
printf("\naSync engine count: %d", prop.asyncEngineCount);
printf("\nConcurrent kernels: %d", prop.concurrentKernels);
printf("\nCan map host memory: %d", prop.canMapHostMemory);
printf("\nPCI Bus Device Domain: %d %d %d", prop.pciBusID, prop.pciDeviceID, prop.pciDomainID);
printf("\nTotal global memory: 0x%x", prop.totalGlobalMem);
printf("\nTotal const memory: 0x%x", prop.totalConstMem);
printf("\nTotal shared memory/block: 0x%x", prop.sharedMemPerBlock);
printf("\nTotal shared memory/multiprocessor: 0x%x", prop.sharedMemPerMultiprocessor);
printf("\nMemory bus width: %d", prop.memoryBusWidth);
printf("\nintegated: ", prop.integrated);
printf("\nmaxGridSize: 0%d", prop.maxGridSize);
printf("\nmaxThreadsDim: 0x%x", prop.maxThreadsDim);
printf("\nmaxThreadsPerBlock: %d ", prop.maxThreadsPerBlock);
printf("\nmaxThreadsPerMultiProcessor: %d", prop.maxThreadsPerMultiProcessor);
printf("\nmultiProcessorCount: %d", prop.multiProcessorCount);
}
getchar();
return 0;
} | #include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
int main()
{
hipDeviceProp_t prop;
int dev;
int stat;
int count;
int i;
hipGetDeviceCount(&count); // count is updated with No. of GPU-s.
for (i = 0; i < count; i++) {
hipGetDeviceProperties(&prop, i);
printf("\n--- General information for device %d ---", i);
printf("\nName: %s.", prop.name);
printf("\nunifiedAddressing: %d.", prop.unifiedAddressing);
printf("\nCompute capability: %d.%d", prop.major, prop.minor);
printf("\nCompute mode: 0x%x", prop.computeMode);
printf("\nClock rate: %d", prop.clockRate);
printf("\nDevice copy overlap: ", prop.deviceOverlap);
printf("\nKernel execution timeout: %d", prop.kernelExecTimeoutEnabled);
printf("\naSync engine count: %d", prop.asyncEngineCount);
printf("\nConcurrent kernels: %d", prop.concurrentKernels);
printf("\nCan map host memory: %d", prop.canMapHostMemory);
printf("\nPCI Bus Device Domain: %d %d %d", prop.pciBusID, prop.pciDeviceID, prop.pciDomainID);
printf("\nTotal global memory: 0x%x", prop.totalGlobalMem);
printf("\nTotal const memory: 0x%x", prop.totalConstMem);
printf("\nTotal shared memory/block: 0x%x", prop.sharedMemPerBlock);
printf("\nTotal shared memory/multiprocessor: 0x%x", prop.sharedMemPerMultiprocessor);
printf("\nMemory bus width: %d", prop.memoryBusWidth);
printf("\nintegated: ", prop.integrated);
printf("\nmaxGridSize: 0%d", prop.maxGridSize);
printf("\nmaxThreadsDim: 0x%x", prop.maxThreadsDim);
printf("\nmaxThreadsPerBlock: %d ", prop.maxThreadsPerBlock);
printf("\nmaxThreadsPerMultiProcessor: %d", prop.maxThreadsPerMultiProcessor);
printf("\nmultiProcessorCount: %d", prop.multiProcessorCount);
}
getchar();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
int main()
{
hipDeviceProp_t prop;
int dev;
int stat;
int count;
int i;
hipGetDeviceCount(&count); // count is updated with No. of GPU-s.
for (i = 0; i < count; i++) {
hipGetDeviceProperties(&prop, i);
printf("\n--- General information for device %d ---", i);
printf("\nName: %s.", prop.name);
printf("\nunifiedAddressing: %d.", prop.unifiedAddressing);
printf("\nCompute capability: %d.%d", prop.major, prop.minor);
printf("\nCompute mode: 0x%x", prop.computeMode);
printf("\nClock rate: %d", prop.clockRate);
printf("\nDevice copy overlap: ", prop.deviceOverlap);
printf("\nKernel execution timeout: %d", prop.kernelExecTimeoutEnabled);
printf("\naSync engine count: %d", prop.asyncEngineCount);
printf("\nConcurrent kernels: %d", prop.concurrentKernels);
printf("\nCan map host memory: %d", prop.canMapHostMemory);
printf("\nPCI Bus Device Domain: %d %d %d", prop.pciBusID, prop.pciDeviceID, prop.pciDomainID);
printf("\nTotal global memory: 0x%x", prop.totalGlobalMem);
printf("\nTotal const memory: 0x%x", prop.totalConstMem);
printf("\nTotal shared memory/block: 0x%x", prop.sharedMemPerBlock);
printf("\nTotal shared memory/multiprocessor: 0x%x", prop.sharedMemPerMultiprocessor);
printf("\nMemory bus width: %d", prop.memoryBusWidth);
printf("\nintegated: ", prop.integrated);
printf("\nmaxGridSize: 0%d", prop.maxGridSize);
printf("\nmaxThreadsDim: 0x%x", prop.maxThreadsDim);
printf("\nmaxThreadsPerBlock: %d ", prop.maxThreadsPerBlock);
printf("\nmaxThreadsPerMultiProcessor: %d", prop.maxThreadsPerMultiProcessor);
printf("\nmultiProcessorCount: %d", prop.multiProcessorCount);
}
getchar();
return 0;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
int main()
{
hipDeviceProp_t prop;
int dev;
int stat;
int count;
int i;
hipGetDeviceCount(&count); // count is updated with No. of GPU-s.
for (i = 0; i < count; i++) {
hipGetDeviceProperties(&prop, i);
printf("\n--- General information for device %d ---", i);
printf("\nName: %s.", prop.name);
printf("\nunifiedAddressing: %d.", prop.unifiedAddressing);
printf("\nCompute capability: %d.%d", prop.major, prop.minor);
printf("\nCompute mode: 0x%x", prop.computeMode);
printf("\nClock rate: %d", prop.clockRate);
printf("\nDevice copy overlap: ", prop.deviceOverlap);
printf("\nKernel execution timeout: %d", prop.kernelExecTimeoutEnabled);
printf("\naSync engine count: %d", prop.asyncEngineCount);
printf("\nConcurrent kernels: %d", prop.concurrentKernels);
printf("\nCan map host memory: %d", prop.canMapHostMemory);
printf("\nPCI Bus Device Domain: %d %d %d", prop.pciBusID, prop.pciDeviceID, prop.pciDomainID);
printf("\nTotal global memory: 0x%x", prop.totalGlobalMem);
printf("\nTotal const memory: 0x%x", prop.totalConstMem);
printf("\nTotal shared memory/block: 0x%x", prop.sharedMemPerBlock);
printf("\nTotal shared memory/multiprocessor: 0x%x", prop.sharedMemPerMultiprocessor);
printf("\nMemory bus width: %d", prop.memoryBusWidth);
printf("\nintegated: ", prop.integrated);
printf("\nmaxGridSize: 0%d", prop.maxGridSize);
printf("\nmaxThreadsDim: 0x%x", prop.maxThreadsDim);
printf("\nmaxThreadsPerBlock: %d ", prop.maxThreadsPerBlock);
printf("\nmaxThreadsPerMultiProcessor: %d", prop.maxThreadsPerMultiProcessor);
printf("\nmultiProcessorCount: %d", prop.multiProcessorCount);
}
getchar();
return 0;
} | .text
.file "kernel.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 1520
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 4(%rsp)
jle .LBB0_3
# %bb.1: # %.lr.ph
leaq 344(%rsp), %rbx
leaq 332(%rsp), %r14
leaq 8(%rsp), %r15
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB0_2: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
movl $.L.str, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movq %r15, %rsi
xorl %eax, %eax
callq printf
movl 612(%rsp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl 368(%rsp), %esi
movl 372(%rsp), %edx
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl 412(%rsp), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movl 356(%rsp), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
movl 392(%rsp), %esi
movl $.L.str.6, %edi
xorl %eax, %eax
callq printf
movl 400(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movl 608(%rsp), %esi
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
movl 584(%rsp), %esi
movl $.L.str.9, %edi
xorl %eax, %eax
callq printf
movl 408(%rsp), %esi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
movl 592(%rsp), %esi
movl 596(%rsp), %edx
movl 600(%rsp), %ecx
movl $.L.str.11, %edi
xorl %eax, %eax
callq printf
movq 296(%rsp), %rsi
movl $.L.str.12, %edi
xorl %eax, %eax
callq printf
movq 360(%rsp), %rsi
movl $.L.str.13, %edi
xorl %eax, %eax
callq printf
movq 304(%rsp), %rsi
movl $.L.str.14, %edi
xorl %eax, %eax
callq printf
movq 648(%rsp), %rsi
movl $.L.str.15, %edi
xorl %eax, %eax
callq printf
movl 620(%rsp), %esi
movl $.L.str.16, %edi
xorl %eax, %eax
callq printf
movl 404(%rsp), %esi
movl $.L.str.17, %edi
xorl %eax, %eax
callq printf
movl $.L.str.18, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.19, %edi
movq %r14, %rsi
xorl %eax, %eax
callq printf
movl 328(%rsp), %esi
movl $.L.str.20, %edi
xorl %eax, %eax
callq printf
movl 632(%rsp), %esi
movl $.L.str.21, %edi
xorl %eax, %eax
callq printf
movl 396(%rsp), %esi
movl $.L.str.22, %edi
xorl %eax, %eax
callq printf
incl %ebp
cmpl 4(%rsp), %ebp
jl .LBB0_2
.LBB0_3: # %._crit_edge
movq stdin(%rip), %rdi
callq getc
xorl %eax, %eax
addq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\n--- General information for device %d ---"
.size .L.str, 43
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\nName: %s."
.size .L.str.1, 11
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\nunifiedAddressing: %d."
.size .L.str.2, 24
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "\nCompute capability: %d.%d"
.size .L.str.3, 27
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\nCompute mode: 0x%x"
.size .L.str.4, 20
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "\nClock rate: %d"
.size .L.str.5, 16
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "\nDevice copy overlap: "
.size .L.str.6, 23
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "\nKernel execution timeout: %d"
.size .L.str.7, 30
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "\naSync engine count: %d"
.size .L.str.8, 24
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "\nConcurrent kernels: %d"
.size .L.str.9, 24
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "\nCan map host memory: %d"
.size .L.str.10, 25
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "\nPCI Bus Device Domain: %d %d %d"
.size .L.str.11, 33
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "\nTotal global memory: 0x%x"
.size .L.str.12, 27
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz "\nTotal const memory: 0x%x"
.size .L.str.13, 26
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "\nTotal shared memory/block: 0x%x"
.size .L.str.14, 33
.type .L.str.15,@object # @.str.15
.L.str.15:
.asciz "\nTotal shared memory/multiprocessor: 0x%x"
.size .L.str.15, 42
.type .L.str.16,@object # @.str.16
.L.str.16:
.asciz "\nMemory bus width: %d"
.size .L.str.16, 22
.type .L.str.17,@object # @.str.17
.L.str.17:
.asciz "\nintegated: "
.size .L.str.17, 13
.type .L.str.18,@object # @.str.18
.L.str.18:
.asciz "\nmaxGridSize: 0%d"
.size .L.str.18, 18
.type .L.str.19,@object # @.str.19
.L.str.19:
.asciz "\nmaxThreadsDim: 0x%x"
.size .L.str.19, 21
.type .L.str.20,@object # @.str.20
.L.str.20:
.asciz "\nmaxThreadsPerBlock: %d "
.size .L.str.20, 25
.type .L.str.21,@object # @.str.21
.L.str.21:
.asciz "\nmaxThreadsPerMultiProcessor: %d"
.size .L.str.21, 33
.type .L.str.22,@object # @.str.22
.L.str.22:
.asciz "\nmultiProcessorCount: %d"
.size .L.str.22, 25
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00178a2b_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "\n--- General information for device %d ---"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "\nName: %s."
.LC2:
.string "\nunifiedAddressing: %d."
.LC3:
.string "\nCompute capability: %d.%d"
.LC4:
.string "\nCompute mode: 0x%x"
.LC5:
.string "\nClock rate: %d"
.LC6:
.string "\nDevice copy overlap: "
.LC7:
.string "\nKernel execution timeout: %d"
.LC8:
.string "\naSync engine count: %d"
.LC9:
.string "\nConcurrent kernels: %d"
.LC10:
.string "\nCan map host memory: %d"
.section .rodata.str1.8
.align 8
.LC11:
.string "\nPCI Bus Device Domain: %d %d %d"
.section .rodata.str1.1
.LC12:
.string "\nTotal global memory: 0x%x"
.LC13:
.string "\nTotal const memory: 0x%x"
.section .rodata.str1.8
.align 8
.LC14:
.string "\nTotal shared memory/block: 0x%x"
.align 8
.LC15:
.string "\nTotal shared memory/multiprocessor: 0x%x"
.section .rodata.str1.1
.LC16:
.string "\nMemory bus width: %d"
.LC17:
.string "\nintegated: "
.LC18:
.string "\nmaxGridSize: 0%d"
.LC19:
.string "\nmaxThreadsDim: 0x%x"
.LC20:
.string "\nmaxThreadsPerBlock: %d "
.section .rodata.str1.8
.align 8
.LC21:
.string "\nmaxThreadsPerMultiProcessor: %d"
.section .rodata.str1.1
.LC22:
.string "\nmultiProcessorCount: %d"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $1064, %rsp
.cfi_def_cfa_offset 1120
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
cmpl $0, 12(%rsp)
jle .L4
movl $0, %ebx
leaq .LC0(%rip), %r15
leaq .LC1(%rip), %r14
leaq .LC2(%rip), %r13
leaq .LC3(%rip), %r12
.L5:
leaq 16(%rsp), %rbp
movl %ebx, %esi
movq %rbp, %rdi
call cudaGetDeviceProperties_v2@PLT
movl %ebx, %edx
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbp, %rdx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 620(%rsp), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 380(%rsp), %ecx
movl 376(%rsp), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 420(%rsp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 364(%rsp), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 400(%rsp), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 408(%rsp), %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 616(%rsp), %edx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 592(%rsp), %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 416(%rsp), %edx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 608(%rsp), %r8d
movl 604(%rsp), %ecx
movl 600(%rsp), %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 304(%rsp), %rdx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 368(%rsp), %rdx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 312(%rsp), %rdx
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 656(%rsp), %rdx
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 628(%rsp), %edx
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 412(%rsp), %edx
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 352(%rsp), %rdx
leaq .LC18(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 340(%rsp), %rdx
leaq .LC19(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %edx
leaq .LC20(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 640(%rsp), %edx
leaq .LC21(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 404(%rsp), %edx
leaq .LC22(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, 12(%rsp)
jg .L5
.L4:
movq stdin(%rip), %rdi
call getc@PLT
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L9
movl $0, %eax
addq $1064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 1520
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 4(%rsp)
jle .LBB0_3
# %bb.1: # %.lr.ph
leaq 344(%rsp), %rbx
leaq 332(%rsp), %r14
leaq 8(%rsp), %r15
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB0_2: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
movl $.L.str, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movq %r15, %rsi
xorl %eax, %eax
callq printf
movl 612(%rsp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl 368(%rsp), %esi
movl 372(%rsp), %edx
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl 412(%rsp), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movl 356(%rsp), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
movl 392(%rsp), %esi
movl $.L.str.6, %edi
xorl %eax, %eax
callq printf
movl 400(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movl 608(%rsp), %esi
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
movl 584(%rsp), %esi
movl $.L.str.9, %edi
xorl %eax, %eax
callq printf
movl 408(%rsp), %esi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
movl 592(%rsp), %esi
movl 596(%rsp), %edx
movl 600(%rsp), %ecx
movl $.L.str.11, %edi
xorl %eax, %eax
callq printf
movq 296(%rsp), %rsi
movl $.L.str.12, %edi
xorl %eax, %eax
callq printf
movq 360(%rsp), %rsi
movl $.L.str.13, %edi
xorl %eax, %eax
callq printf
movq 304(%rsp), %rsi
movl $.L.str.14, %edi
xorl %eax, %eax
callq printf
movq 648(%rsp), %rsi
movl $.L.str.15, %edi
xorl %eax, %eax
callq printf
movl 620(%rsp), %esi
movl $.L.str.16, %edi
xorl %eax, %eax
callq printf
movl 404(%rsp), %esi
movl $.L.str.17, %edi
xorl %eax, %eax
callq printf
movl $.L.str.18, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.19, %edi
movq %r14, %rsi
xorl %eax, %eax
callq printf
movl 328(%rsp), %esi
movl $.L.str.20, %edi
xorl %eax, %eax
callq printf
movl 632(%rsp), %esi
movl $.L.str.21, %edi
xorl %eax, %eax
callq printf
movl 396(%rsp), %esi
movl $.L.str.22, %edi
xorl %eax, %eax
callq printf
incl %ebp
cmpl 4(%rsp), %ebp
jl .LBB0_2
.LBB0_3: # %._crit_edge
movq stdin(%rip), %rdi
callq getc
xorl %eax, %eax
addq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\n--- General information for device %d ---"
.size .L.str, 43
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\nName: %s."
.size .L.str.1, 11
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\nunifiedAddressing: %d."
.size .L.str.2, 24
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "\nCompute capability: %d.%d"
.size .L.str.3, 27
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\nCompute mode: 0x%x"
.size .L.str.4, 20
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "\nClock rate: %d"
.size .L.str.5, 16
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "\nDevice copy overlap: "
.size .L.str.6, 23
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "\nKernel execution timeout: %d"
.size .L.str.7, 30
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "\naSync engine count: %d"
.size .L.str.8, 24
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "\nConcurrent kernels: %d"
.size .L.str.9, 24
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "\nCan map host memory: %d"
.size .L.str.10, 25
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "\nPCI Bus Device Domain: %d %d %d"
.size .L.str.11, 33
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "\nTotal global memory: 0x%x"
.size .L.str.12, 27
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz "\nTotal const memory: 0x%x"
.size .L.str.13, 26
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "\nTotal shared memory/block: 0x%x"
.size .L.str.14, 33
.type .L.str.15,@object # @.str.15
.L.str.15:
.asciz "\nTotal shared memory/multiprocessor: 0x%x"
.size .L.str.15, 42
.type .L.str.16,@object # @.str.16
.L.str.16:
.asciz "\nMemory bus width: %d"
.size .L.str.16, 22
.type .L.str.17,@object # @.str.17
.L.str.17:
.asciz "\nintegated: "
.size .L.str.17, 13
.type .L.str.18,@object # @.str.18
.L.str.18:
.asciz "\nmaxGridSize: 0%d"
.size .L.str.18, 18
.type .L.str.19,@object # @.str.19
.L.str.19:
.asciz "\nmaxThreadsDim: 0x%x"
.size .L.str.19, 21
.type .L.str.20,@object # @.str.20
.L.str.20:
.asciz "\nmaxThreadsPerBlock: %d "
.size .L.str.20, 25
.type .L.str.21,@object # @.str.21
.L.str.21:
.asciz "\nmaxThreadsPerMultiProcessor: %d"
.size .L.str.21, 33
.type .L.str.22,@object # @.str.22
.L.str.22:
.asciz "\nmultiProcessorCount: %d"
.size .L.str.22, 25
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <math.h>
#include <iostream>
#include <inttypes.h>
#include <vector>
#include <fstream> // I/O
//struct DataFrame
struct DataFrame
{
int w;
int h;
float* e;
};
//Thread block size
#define BLOCK_SIZE 16 //number of threads for each block
#define GRID_SIZE 128 //dimension of entire space
//warning GRID_SIZE must be multiple of 16
#define BIN_SIZE 4 //size of a hisogram bin
#define TH 30 //threshold for triggering the histogram
__global__ void GridKernel(const DataFrame, DataFrame, unsigned int*); //aggiungere i parametri
//------------------------------------------------------------
//Histogram algorithm HOST CODE
float CircleFit(const DataFrame data, DataFrame circles)
{
//load data to device memory
DataFrame d_data;
d_data.w = data.w;
d_data.h = data.h;
size_t size = data.w * data.h * sizeof(float);
cudaError_t err = cudaMalloc(&d_data.e, size);
if(err)
printf("CUDA malloc data DataFrame: %s\n",cudaGetErrorString(err));
cudaMemcpy(d_data.e, data.e, size, cudaMemcpyHostToDevice);
//allocate circles
DataFrame d_circles;
d_circles.w = circles.w;
d_circles.h = circles.h;
size = circles.w * circles.h * sizeof(float);
err = cudaMalloc(&d_circles.e, size);
if(err)
printf("CUDA malloc circles DataFrame: %s\n",cudaGetErrorString(err));
//allocate variable for counting circles found
unsigned int *d_counter;
unsigned int h_counter;
cudaMallocManaged(&d_counter, sizeof(unsigned int));
if(err)
printf("CUDA malloc counter variable: %s\n",cudaGetErrorString(err));
// printf("initialized counter : %u\n", *counter);
// Hai assegnato al puntatore l'indirizzo di memoria da utilizzare, ma e' memoria della GPU, non puoi accedere da qui
// ma il valore nella cella puntata non e' inizializzato
cudaMemset(d_counter, 0, sizeof(unsigned int));
cudaMemcpy(&h_counter, d_counter, sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("initialized counter : %u\n", h_counter);
//
float time;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//start time
cudaEventRecord(start);
// Define the geometry
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE/ dimBlock.x, GRID_SIZE/ dimBlock.y);
//warning GRID_SIZE must be multiple of 16
// Invoke kernel
GridKernel<<<dimGrid, dimBlock>>>(d_data, d_circles, d_counter);
err = cudaDeviceSynchronize();
//stop time
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
if(err)
printf("Run kernel: %s\n", cudaGetErrorString(err));
printf("Time: %3.5f ms\n",time);
// Read C from device memory
size = circles.w * circles.h * sizeof(float);
err = cudaMemcpy(circles.e, d_circles.e, size, cudaMemcpyDeviceToHost);
if(err)
printf("Copy circles off of device: %s\n",cudaGetErrorString(err));
cudaMemcpy(&h_counter, d_counter, sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("counter : %u\n", h_counter);
// Free device memory
cudaFree(d_data.e);
cudaFree(d_circles.e);
cudaFree(d_counter);
return time;
} //END HOST FUNCTION
//---------------------------------------------------------
// thread aware log function
__device__ void log_msg(const char * message)
{
printf("%d.%d.%d.%d-%s", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, message);
}
//Device function to calculate distance
__device__ float Distance2(float x, float y, float xd, float yd)
{
return (x-xd)*(x-xd)+(y-yd)*(y-yd);
}
//---------------------------------------------------------
//Device function to Get Element from data
__device__ float GetElement(const DataFrame D, int row, int col)
{
if ( (row < D.h ) && (col < D.w ) )
return D.e[row * D.w + col];
else
return 0.0;
}
//---------------------------------------------------------------
//Device function to Set Element of data
__device__ void SetElement(DataFrame D, int row, int col, float value)
{
if ( (row < D.h ) && (col < D.w ) )
D.e[row * D.w + col] = value;
}
//---------------------------------------------------------
//Device function to fill histo
// __device__ void HistoFill(int x, int y, DataFrame data,int *h, DataFrame circles, unsigned int *counter)
__device__ void HstFill(int x, int y, DataFrame data,DataFrame circles, unsigned int *counter)
{
const size_t HST_SIZE = GRID_SIZE*GRID_SIZE/BIN_SIZE;
int hst[HST_SIZE] = {0}; // va inizializzato
int i, idx;
// log_id(); printf("HstFill\n");
// atomicAdd(counter,1);
for ( i=0; i<data.h; i++)
{
// float xd, yd, d;
float xd, yd;
int d;
xd = GetElement(data, i, 0);
yd = GetElement(data, i, 1);
d = __float2int_rn( Distance2( __int2float_rn(x), __int2float_rn(y),xd,yd) ) / BIN_SIZE ;
if (d < HST_SIZE) hst[d] +=1; // questo if serve per evitare errori di accesso alla memoria nel caso di valori "strani"
// potrebbero lavorare piu' thread sullo stesso elemento
// va implementato un meccanismo di locking per evitarlo
// in piu' questo blocco va eseguito solo al termine del ciclo for, quando hai ormai analizzato tutti i punti
// altrimenti crei (n - TH) volte lo stesso cerchio
/*
if(hst[bin]>TH)
{
idx = atomicAdd(counter,1); // garantisce che l'incremento avvenga un thread alla volta, quindi idx puo' essere usato solo dal thread in esecuzione
SetElement(circles, idx,0,x);
SetElement(circles, idx,1,y);
// float r2=d;
// SetElement(circles, idx,2,r2);
SetElement(circles, idx,2,d);
}
*/
}
// dopo aver riempito l'array hst, scandisce tutti gli elementi e aggiunge alla lista dei cerchi il cerchio trovato
for ( i=0; i < HST_SIZE; i++)
{
if(hst[i]>TH)
{
idx = atomicAdd(counter,1); // garantisce che l'incremento avvenga un thread alla volta, quindi idx puo' essere usato solo dal thread in esecuzione
//log_msg(""); printf("counter: %d, x %f, y %f, radius: %f\n", idx, __int2float_rn(x), __int2float_rn(y), sqrt( __int2float_rn(i*BIN_SIZE) ) );
SetElement(circles, idx,0, __int2float_rn(x));
SetElement(circles, idx,1, __int2float_rn(y));
// float r2=d;
// SetElement(circles, idx,2,r2);
SetElement(circles, idx,2, sqrt( __int2float_rn(i*BIN_SIZE) ) );
}
}
}
//---------------------------------------------------------
//Histogram algorithm kernel
__global__ void GridKernel(DataFrame data, DataFrame circles,unsigned int *counter)
{
//x and y indexes
int x = blockIdx.x * blockDim.x+ threadIdx.x - (GRID_SIZE / 2);
int y = blockIdx.y * blockDim.y +threadIdx.y - (GRID_SIZE / 2);
// const size_t histo_size = GRID_SIZE*GRID_SIZE/BIN_SIZE;
// int histo[histo_size];
// se con histo non ci fai niente, e' inutile definirla qui e passarla alla funzione
// in piu' il contenuto non e' inizializzato
// HistoFill(x,y, data, histo, circles, counter);
HstFill(x,y, data, circles, counter);
}
//------------------------------------------------------------
//dump dataframe function
void dump(DataFrame m)
{
for (int i = 0; i< m.h; i++) // Loop sulle righe
{
for (int j = 0; j< m.w; j++) // Loop sulle colonne
printf("%3.1f\t", m.e[i*m.w + j]);
printf("\n"); // A capo di fine riga
}
printf("\n");
}
//------------------------------------------------------------
//read floats from file.dat
void readBin(std::vector<float> buf)
{
}
//-------------------------------------------------------------
int main(int argc, char** argv)
{
std::vector<float> buf;
std::ifstream in("file.dat",std::ios::binary); //open the input file
while(!in.eof())
{
float w;
in.read( (char*) &w, sizeof(w));
buf.push_back(w);
}
in.close(); //close the input file .... ormai i dati sono letti, puoi chiudere il file
DataFrame data, circles;
data.h = buf.size()>>1;
data.w = 2;
data.e = (float*) malloc(data.w * data.h * sizeof(float));
circles.h = 15;
circles.w = 3;
circles.e = (float*) malloc(circles.w * circles.h * sizeof(float));
data.e = &buf[0]; //copy imported floats from buf to data .... non stai copiando i dati, se fai cosi' non ti serve la memoria che hai allocato prima
float a = CircleFit(data, circles);
// dump(data);
dump(circles);
// in.close(); //close the input file
return 0;
} | .file "tmpxft_0004840e_00000000-6_reticolo.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4184:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4184:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7log_msgPKc
.type _Z7log_msgPKc, @function
_Z7log_msgPKc:
.LFB4164:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE4164:
.size _Z7log_msgPKc, .-_Z7log_msgPKc
.globl _Z9Distance2ffff
.type _Z9Distance2ffff, @function
_Z9Distance2ffff:
.LFB4165:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE4165:
.size _Z9Distance2ffff, .-_Z9Distance2ffff
.globl _Z10GetElement9DataFrameii
.type _Z10GetElement9DataFrameii, @function
_Z10GetElement9DataFrameii:
.LFB4166:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE4166:
.size _Z10GetElement9DataFrameii, .-_Z10GetElement9DataFrameii
.globl _Z10SetElement9DataFrameiif
.type _Z10SetElement9DataFrameiif, @function
_Z10SetElement9DataFrameiif:
.LFB4167:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE4167:
.size _Z10SetElement9DataFrameiif, .-_Z10SetElement9DataFrameiif
.globl _Z7HstFillii9DataFrameS_Pj
.type _Z7HstFillii9DataFrameS_Pj, @function
_Z7HstFillii9DataFrameS_Pj:
.LFB4168:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE4168:
.size _Z7HstFillii9DataFrameS_Pj, .-_Z7HstFillii9DataFrameS_Pj
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%3.1f\t"
.LC1:
.string "\n"
.text
.globl _Z4dump9DataFrame
.type _Z4dump9DataFrame, @function
_Z4dump9DataFrame:
.LFB4169:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %rax
sarq $32, %rax
movl %eax, 16(%rsp)
movl %edi, 12(%rsp)
testl %eax, %eax
jle .L14
movq %rsi, %r12
movl %edi, 20(%rsp)
movl $0, %r15d
movl $0, %r14d
movslq %edi, %rax
movq %rax, 24(%rsp)
leaq .LC0(%rip), %r13
jmp .L15
.L17:
movslq %r15d, %rbp
leaq 0(,%rbp,4), %rbx
movq 24(%rsp), %rax
addq %rax, %rbp
salq $2, %rbp
.L16:
pxor %xmm0, %xmm0
cvtss2sd (%rbx,%r12), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L16
.L18:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r14d
movl 20(%rsp), %eax
addl %eax, %r15d
movl 16(%rsp), %eax
cmpl %eax, %r14d
je .L14
.L15:
cmpl $0, 12(%rsp)
jg .L17
jmp .L18
.L14:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4169:
.size _Z4dump9DataFrame, .-_Z4dump9DataFrame
.globl _Z7readBinSt6vectorIfSaIfEE
.type _Z7readBinSt6vectorIfSaIfEE, @function
_Z7readBinSt6vectorIfSaIfEE:
.LFB4170:
.cfi_startproc
endbr64
ret
.cfi_endproc
.LFE4170:
.size _Z7readBinSt6vectorIfSaIfEE, .-_Z7readBinSt6vectorIfSaIfEE
.globl _Z42__device_stub__Z10GridKernel9DataFrameS_PjR9DataFrameS0_Pj
.type _Z42__device_stub__Z10GridKernel9DataFrameS_PjR9DataFrameS0_Pj, @function
_Z42__device_stub__Z10GridKernel9DataFrameS_PjR9DataFrameS0_Pj:
.LFB4206:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
movq %rdi, 80(%rsp)
movq %rsi, 88(%rsp)
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L26
.L22:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L27
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z10GridKernel9DataFrameS_Pj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L22
.L27:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4206:
.size _Z42__device_stub__Z10GridKernel9DataFrameS_PjR9DataFrameS0_Pj, .-_Z42__device_stub__Z10GridKernel9DataFrameS_PjR9DataFrameS0_Pj
.globl _Z10GridKernel9DataFrameS_Pj
.type _Z10GridKernel9DataFrameS_Pj, @function
_Z10GridKernel9DataFrameS_Pj:
.LFB4207:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %rdi, 16(%rsp)
movq %rsi, 24(%rsp)
movq %rdx, (%rsp)
movq %rcx, 8(%rsp)
movq %r8, %rdx
movq %rsp, %rsi
leaq 16(%rsp), %rdi
call _Z42__device_stub__Z10GridKernel9DataFrameS_PjR9DataFrameS0_Pj
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4207:
.size _Z10GridKernel9DataFrameS_Pj, .-_Z10GridKernel9DataFrameS_Pj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "CUDA malloc data DataFrame: %s\n"
.align 8
.LC3:
.string "CUDA malloc circles DataFrame: %s\n"
.align 8
.LC4:
.string "CUDA malloc counter variable: %s\n"
.section .rodata.str1.1
.LC5:
.string "initialized counter : %u\n"
.LC6:
.string "Run kernel: %s\n"
.LC7:
.string "Time: %3.5f ms\n"
.section .rodata.str1.8
.align 8
.LC8:
.string "Copy circles off of device: %s\n"
.section .rodata.str1.1
.LC9:
.string "counter : %u\n"
.text
.globl _Z9CircleFit9DataFrameS_
.type _Z9CircleFit9DataFrameS_, @function
_Z9CircleFit9DataFrameS_:
.LFB4163:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $144, %rsp
.cfi_def_cfa_offset 192
movq %rsi, %r13
movq %rdx, %rbx
movq %rcx, %r12
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movq %rdi, %rax
sarq $32, %rax
movq %rdx, %r14
sarq $32, %r14
movl %edi, 64(%rsp)
movl %eax, 68(%rsp)
imull %eax, %edi
movslq %edi, %rbp
salq $2, %rbp
leaq 72(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L40
.L31:
movl $1, %ecx
movq %rbp, %rdx
movq %r13, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebx, 80(%rsp)
movl %r14d, 84(%rsp)
imull %r14d, %ebx
movslq %ebx, %rbx
salq $2, %rbx
leaq 88(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L41
leaq 16(%rsp), %rdi
movl $1, %edx
movl $4, %esi
call cudaMallocManaged@PLT
.L36:
movl $4, %edx
movl $0, %esi
movq 16(%rsp), %rdi
call cudaMemset@PLT
leaq 12(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movl 12(%rsp), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 24(%rsp), %rdi
call cudaEventCreate@PLT
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movl $8, 52(%rsp)
movl $8, 56(%rsp)
movl $16, 40(%rsp)
movl $16, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movl $1, %ecx
movq 52(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L42
.L33:
call cudaDeviceSynchronize@PLT
movl %eax, %ebp
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movq 32(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 112(%rsp), %rdi
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
call cudaEventElapsedTime@PLT
testl %ebp, %ebp
jne .L43
.L34:
pxor %xmm0, %xmm0
cvtss2sd 112(%rsp), %xmm0
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 88(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L44
.L35:
leaq 12(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movl 12(%rsp), %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movss 112(%rsp), %xmm0
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L45
addq $144, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L40:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L31
.L41:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 16(%rsp), %rdi
movl $1, %edx
movl $4, %esi
call cudaMallocManaged@PLT
movl %ebp, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L36
.L42:
movdqa 64(%rsp), %xmm1
movaps %xmm1, 96(%rsp)
movdqa 80(%rsp), %xmm2
movaps %xmm2, 112(%rsp)
leaq 112(%rsp), %rsi
leaq 96(%rsp), %rdi
movq 16(%rsp), %rdx
call _Z42__device_stub__Z10GridKernel9DataFrameS_PjR9DataFrameS0_Pj
jmp .L33
.L43:
movl %ebp, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L34
.L44:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L35
.L45:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4163:
.size _Z9CircleFit9DataFrameS_, .-_Z9CircleFit9DataFrameS_
.section .rodata.str1.1
.LC10:
.string "_Z10GridKernel9DataFrameS_Pj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4209:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z10GridKernel9DataFrameS_Pj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4209:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .rodata._ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_.str1.1,"aMS",@progbits,1
.LC11:
.string "vector::_M_realloc_insert"
.section .text._ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_,"axG",@progbits,_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_,comdat
.align 2
.weak _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_
.type _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_, @function
_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_:
.LFB4729:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rsi, (%rsp)
movq %rdx, 8(%rsp)
movq 8(%rdi), %rbp
movq (%rdi), %r13
movq %rbp, %rax
subq %r13, %rax
sarq $2, %rax
movabsq $2305843009213693951, %rdx
cmpq %rdx, %rax
je .L65
movq %rdi, %rbx
cmpq %r13, %rbp
movl $1, %edx
cmovne %rax, %rdx
addq %rdx, %rax
jc .L51
movabsq $2305843009213693951, %r14
cmpq %r14, %rax
cmovbe %rax, %r14
movq (%rsp), %r15
subq %r13, %r15
movl $0, %r12d
testq %rax, %rax
je .L52
jmp .L59
.L65:
leaq .LC11(%rip), %rdi
call _ZSt20__throw_length_errorPKc@PLT
.L66:
movq %r15, %rdx
movq %r13, %rsi
movq %r12, %rdi
call memmove@PLT
leaq 4(%r12,%r15), %r15
movq (%rsp), %rax
subq %rax, %rbp
testq %rbp, %rbp
jg .L54
addq %rbp, %r15
movq 16(%rbx), %rsi
subq %r13, %rsi
jmp .L58
.L51:
movq (%rsp), %r15
subq %r13, %r15
movabsq $2305843009213693951, %r14
.L59:
leaq 0(,%r14,4), %rdi
call _Znwm@PLT
movq %rax, %r12
.L52:
movq 8(%rsp), %rax
movss (%rax), %xmm0
movss %xmm0, (%r12,%r15)
testq %r15, %r15
jg .L66
leaq 4(%r12,%r15), %r15
movq (%rsp), %rax
subq %rax, %rbp
testq %rbp, %rbp
jle .L56
.L54:
movq %rbp, %rdx
movq (%rsp), %rsi
movq %r15, %rdi
call memcpy@PLT
.L56:
addq %rbp, %r15
testq %r13, %r13
je .L57
movq 16(%rbx), %rsi
subq %r13, %rsi
.L58:
movq %r13, %rdi
call _ZdlPvm@PLT
.L57:
movq %r12, (%rbx)
movq %r15, 8(%rbx)
leaq (%r12,%r14,4), %rax
movq %rax, 16(%rbx)
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4729:
.size _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_, .-_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_
.section .rodata.str1.1
.LC12:
.string "file.dat"
.text
.globl main
.type main, @function
main:
.LFB4171:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4171
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $576, %rsp
.cfi_def_cfa_offset 608
movq %fs:40, %rax
movq %rax, 568(%rsp)
xorl %eax, %eax
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movq $0, 32(%rsp)
leaq 48(%rsp), %rdi
movl $4, %edx
leaq .LC12(%rip), %rsi
.LEHB0:
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1EPKcSt13_Ios_Openmode@PLT
.LEHE0:
testb $2, 336(%rsp)
jne .L68
leaq 12(%rsp), %rbx
jmp .L71
.L81:
movq 24(%rsp), %rsi
cmpq 32(%rsp), %rsi
je .L69
movss 12(%rsp), %xmm0
movss %xmm0, (%rsi)
addq $4, %rsi
movq %rsi, 24(%rsp)
.L70:
testb $2, 336(%rsp)
jne .L68
.L71:
leaq 48(%rsp), %rdi
movl $4, %edx
movq %rbx, %rsi
.LEHB1:
call _ZNSi4readEPcl@PLT
jmp .L81
.L69:
leaq 16(%rsp), %rdi
movq %rbx, %rdx
call _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_
jmp .L70
.L68:
leaq 48(%rsp), %rdi
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEE5closeEv@PLT
movq 16(%rsp), %rbp
movq 24(%rsp), %rbx
subq %rbp, %rbx
sarq $2, %rbx
shrq %rbx
movl $180, %edi
call malloc@PLT
movq %rax, %r12
salq $32, %rbx
movq %rbx, %rdi
orq $2, %rdi
movabsq $64424509443, %rbx
movq %rbx, %rdx
movq %rax, %rcx
movq %rbp, %rsi
call _Z9CircleFit9DataFrameS_
movq %rbx, %rdi
movq %r12, %rsi
call _Z4dump9DataFrame
.LEHE1:
leaq 48(%rsp), %rdi
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT
testq %rbp, %rbp
je .L72
movq 32(%rsp), %rsi
subq %rbp, %rsi
movq %rbp, %rdi
call _ZdlPvm@PLT
.L72:
movq 568(%rsp), %rax
subq %fs:40, %rax
jne .L82
movl $0, %eax
addq $576, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L77:
.cfi_restore_state
endbr64
movq %rax, %rbx
leaq 48(%rsp), %rdi
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT
movq 16(%rsp), %rdi
movq 32(%rsp), %rsi
subq %rdi, %rsi
testq %rdi, %rdi
je .L74
call _ZdlPvm@PLT
.L74:
movq 568(%rsp), %rax
subq %fs:40, %rax
je .L75
call __stack_chk_fail@PLT
.L75:
movq %rbx, %rdi
.LEHB2:
call _Unwind_Resume@PLT
.LEHE2:
.L82:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4171:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA4171:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE4171-.LLSDACSB4171
.LLSDACSB4171:
.uleb128 .LEHB0-.LFB4171
.uleb128 .LEHE0-.LEHB0
.uleb128 0
.uleb128 0
.uleb128 .LEHB1-.LFB4171
.uleb128 .LEHE1-.LEHB1
.uleb128 .L77-.LFB4171
.uleb128 0
.uleb128 .LEHB2-.LFB4171
.uleb128 .LEHE2-.LEHB2
.uleb128 0
.uleb128 0
.LLSDACSE4171:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <math.h>
#include <iostream>
#include <inttypes.h>
#include <vector>
#include <fstream> // I/O
//struct DataFrame
struct DataFrame
{
int w;
int h;
float* e;
};
//Thread block size
#define BLOCK_SIZE 16 //number of threads for each block
#define GRID_SIZE 128 //dimension of entire space
//warning GRID_SIZE must be multiple of 16
#define BIN_SIZE 4 //size of a hisogram bin
#define TH 30 //threshold for triggering the histogram
__global__ void GridKernel(const DataFrame, DataFrame, unsigned int*); //aggiungere i parametri
//------------------------------------------------------------
//Histogram algorithm HOST CODE
float CircleFit(const DataFrame data, DataFrame circles)
{
//load data to device memory
DataFrame d_data;
d_data.w = data.w;
d_data.h = data.h;
size_t size = data.w * data.h * sizeof(float);
cudaError_t err = cudaMalloc(&d_data.e, size);
if(err)
printf("CUDA malloc data DataFrame: %s\n",cudaGetErrorString(err));
cudaMemcpy(d_data.e, data.e, size, cudaMemcpyHostToDevice);
//allocate circles
DataFrame d_circles;
d_circles.w = circles.w;
d_circles.h = circles.h;
size = circles.w * circles.h * sizeof(float);
err = cudaMalloc(&d_circles.e, size);
if(err)
printf("CUDA malloc circles DataFrame: %s\n",cudaGetErrorString(err));
//allocate variable for counting circles found
unsigned int *d_counter;
unsigned int h_counter;
cudaMallocManaged(&d_counter, sizeof(unsigned int));
if(err)
printf("CUDA malloc counter variable: %s\n",cudaGetErrorString(err));
// printf("initialized counter : %u\n", *counter);
// Hai assegnato al puntatore l'indirizzo di memoria da utilizzare, ma e' memoria della GPU, non puoi accedere da qui
// ma il valore nella cella puntata non e' inizializzato
cudaMemset(d_counter, 0, sizeof(unsigned int));
cudaMemcpy(&h_counter, d_counter, sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("initialized counter : %u\n", h_counter);
//
float time;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//start time
cudaEventRecord(start);
// Define the geometry
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE/ dimBlock.x, GRID_SIZE/ dimBlock.y);
//warning GRID_SIZE must be multiple of 16
// Invoke kernel
GridKernel<<<dimGrid, dimBlock>>>(d_data, d_circles, d_counter);
err = cudaDeviceSynchronize();
//stop time
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
if(err)
printf("Run kernel: %s\n", cudaGetErrorString(err));
printf("Time: %3.5f ms\n",time);
// Read C from device memory
size = circles.w * circles.h * sizeof(float);
err = cudaMemcpy(circles.e, d_circles.e, size, cudaMemcpyDeviceToHost);
if(err)
printf("Copy circles off of device: %s\n",cudaGetErrorString(err));
cudaMemcpy(&h_counter, d_counter, sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("counter : %u\n", h_counter);
// Free device memory
cudaFree(d_data.e);
cudaFree(d_circles.e);
cudaFree(d_counter);
return time;
} //END HOST FUNCTION
//---------------------------------------------------------
// thread aware log function
__device__ void log_msg(const char * message)
{
printf("%d.%d.%d.%d-%s", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, message);
}
//Device function to calculate distance
__device__ float Distance2(float x, float y, float xd, float yd)
{
return (x-xd)*(x-xd)+(y-yd)*(y-yd);
}
//---------------------------------------------------------
//Device function to Get Element from data
__device__ float GetElement(const DataFrame D, int row, int col)
{
if ( (row < D.h ) && (col < D.w ) )
return D.e[row * D.w + col];
else
return 0.0;
}
//---------------------------------------------------------------
//Device function to Set Element of data
__device__ void SetElement(DataFrame D, int row, int col, float value)
{
if ( (row < D.h ) && (col < D.w ) )
D.e[row * D.w + col] = value;
}
//---------------------------------------------------------
//Device function to fill histo
// __device__ void HistoFill(int x, int y, DataFrame data,int *h, DataFrame circles, unsigned int *counter)
__device__ void HstFill(int x, int y, DataFrame data,DataFrame circles, unsigned int *counter)
{
const size_t HST_SIZE = GRID_SIZE*GRID_SIZE/BIN_SIZE;
int hst[HST_SIZE] = {0}; // va inizializzato
int i, idx;
// log_id(); printf("HstFill\n");
// atomicAdd(counter,1);
for ( i=0; i<data.h; i++)
{
// float xd, yd, d;
float xd, yd;
int d;
xd = GetElement(data, i, 0);
yd = GetElement(data, i, 1);
d = __float2int_rn( Distance2( __int2float_rn(x), __int2float_rn(y),xd,yd) ) / BIN_SIZE ;
if (d < HST_SIZE) hst[d] +=1; // questo if serve per evitare errori di accesso alla memoria nel caso di valori "strani"
// potrebbero lavorare piu' thread sullo stesso elemento
// va implementato un meccanismo di locking per evitarlo
// in piu' questo blocco va eseguito solo al termine del ciclo for, quando hai ormai analizzato tutti i punti
// altrimenti crei (n - TH) volte lo stesso cerchio
/*
if(hst[bin]>TH)
{
idx = atomicAdd(counter,1); // garantisce che l'incremento avvenga un thread alla volta, quindi idx puo' essere usato solo dal thread in esecuzione
SetElement(circles, idx,0,x);
SetElement(circles, idx,1,y);
// float r2=d;
// SetElement(circles, idx,2,r2);
SetElement(circles, idx,2,d);
}
*/
}
// dopo aver riempito l'array hst, scandisce tutti gli elementi e aggiunge alla lista dei cerchi il cerchio trovato
for ( i=0; i < HST_SIZE; i++)
{
if(hst[i]>TH)
{
idx = atomicAdd(counter,1); // garantisce che l'incremento avvenga un thread alla volta, quindi idx puo' essere usato solo dal thread in esecuzione
//log_msg(""); printf("counter: %d, x %f, y %f, radius: %f\n", idx, __int2float_rn(x), __int2float_rn(y), sqrt( __int2float_rn(i*BIN_SIZE) ) );
SetElement(circles, idx,0, __int2float_rn(x));
SetElement(circles, idx,1, __int2float_rn(y));
// float r2=d;
// SetElement(circles, idx,2,r2);
SetElement(circles, idx,2, sqrt( __int2float_rn(i*BIN_SIZE) ) );
}
}
}
//---------------------------------------------------------
//Histogram algorithm kernel
__global__ void GridKernel(DataFrame data, DataFrame circles,unsigned int *counter)
{
//x and y indexes
int x = blockIdx.x * blockDim.x+ threadIdx.x - (GRID_SIZE / 2);
int y = blockIdx.y * blockDim.y +threadIdx.y - (GRID_SIZE / 2);
// const size_t histo_size = GRID_SIZE*GRID_SIZE/BIN_SIZE;
// int histo[histo_size];
// se con histo non ci fai niente, e' inutile definirla qui e passarla alla funzione
// in piu' il contenuto non e' inizializzato
// HistoFill(x,y, data, histo, circles, counter);
HstFill(x,y, data, circles, counter);
}
//------------------------------------------------------------
//dump dataframe function
void dump(DataFrame m)
{
for (int i = 0; i< m.h; i++) // Loop sulle righe
{
for (int j = 0; j< m.w; j++) // Loop sulle colonne
printf("%3.1f\t", m.e[i*m.w + j]);
printf("\n"); // A capo di fine riga
}
printf("\n");
}
//------------------------------------------------------------
//read floats from file.dat
void readBin(std::vector<float> buf)
{
}
//-------------------------------------------------------------
int main(int argc, char** argv)
{
std::vector<float> buf;
std::ifstream in("file.dat",std::ios::binary); //open the input file
while(!in.eof())
{
float w;
in.read( (char*) &w, sizeof(w));
buf.push_back(w);
}
in.close(); //close the input file .... ormai i dati sono letti, puoi chiudere il file
DataFrame data, circles;
data.h = buf.size()>>1;
data.w = 2;
data.e = (float*) malloc(data.w * data.h * sizeof(float));
circles.h = 15;
circles.w = 3;
circles.e = (float*) malloc(circles.w * circles.h * sizeof(float));
data.e = &buf[0]; //copy imported floats from buf to data .... non stai copiando i dati, se fai cosi' non ti serve la memoria che hai allocato prima
float a = CircleFit(data, circles);
// dump(data);
dump(circles);
// in.close(); //close the input file
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <inttypes.h>
#include <vector>
#include <fstream> // I/O
//struct DataFrame
struct DataFrame
{
int w;
int h;
float* e;
};
//Thread block size
#define BLOCK_SIZE 16 //number of threads for each block
#define GRID_SIZE 128 //dimension of entire space
//warning GRID_SIZE must be multiple of 16
#define BIN_SIZE 4 //size of a hisogram bin
#define TH 30 //threshold for triggering the histogram
__global__ void GridKernel(const DataFrame, DataFrame, unsigned int*); //aggiungere i parametri
//------------------------------------------------------------
//Histogram algorithm HOST CODE
float CircleFit(const DataFrame data, DataFrame circles)
{
//load data to device memory
DataFrame d_data;
d_data.w = data.w;
d_data.h = data.h;
size_t size = data.w * data.h * sizeof(float);
hipError_t err = hipMalloc(&d_data.e, size);
if(err)
printf("CUDA malloc data DataFrame: %s\n",hipGetErrorString(err));
hipMemcpy(d_data.e, data.e, size, hipMemcpyHostToDevice);
//allocate circles
DataFrame d_circles;
d_circles.w = circles.w;
d_circles.h = circles.h;
size = circles.w * circles.h * sizeof(float);
err = hipMalloc(&d_circles.e, size);
if(err)
printf("CUDA malloc circles DataFrame: %s\n",hipGetErrorString(err));
//allocate variable for counting circles found
unsigned int *d_counter;
unsigned int h_counter;
hipMallocManaged(&d_counter, sizeof(unsigned int));
if(err)
printf("CUDA malloc counter variable: %s\n",hipGetErrorString(err));
// printf("initialized counter : %u\n", *counter);
// Hai assegnato al puntatore l'indirizzo di memoria da utilizzare, ma e' memoria della GPU, non puoi accedere da qui
// ma il valore nella cella puntata non e' inizializzato
hipMemset(d_counter, 0, sizeof(unsigned int));
hipMemcpy(&h_counter, d_counter, sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("initialized counter : %u\n", h_counter);
//
float time;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//start time
hipEventRecord(start);
// Define the geometry
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE/ dimBlock.x, GRID_SIZE/ dimBlock.y);
//warning GRID_SIZE must be multiple of 16
// Invoke kernel
GridKernel<<<dimGrid, dimBlock>>>(d_data, d_circles, d_counter);
err = hipDeviceSynchronize();
//stop time
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
if(err)
printf("Run kernel: %s\n", hipGetErrorString(err));
printf("Time: %3.5f ms\n",time);
// Read C from device memory
size = circles.w * circles.h * sizeof(float);
err = hipMemcpy(circles.e, d_circles.e, size, hipMemcpyDeviceToHost);
if(err)
printf("Copy circles off of device: %s\n",hipGetErrorString(err));
hipMemcpy(&h_counter, d_counter, sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("counter : %u\n", h_counter);
// Free device memory
hipFree(d_data.e);
hipFree(d_circles.e);
hipFree(d_counter);
return time;
} //END HOST FUNCTION
//---------------------------------------------------------
// thread aware log function
__device__ void log_msg(const char * message)
{
printf("%d.%d.%d.%d-%s", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, message);
}
//Device function to calculate distance
__device__ float Distance2(float x, float y, float xd, float yd)
{
return (x-xd)*(x-xd)+(y-yd)*(y-yd);
}
//---------------------------------------------------------
//Device function to Get Element from data
__device__ float GetElement(const DataFrame D, int row, int col)
{
if ( (row < D.h ) && (col < D.w ) )
return D.e[row * D.w + col];
else
return 0.0;
}
//---------------------------------------------------------------
//Device function to Set Element of data
__device__ void SetElement(DataFrame D, int row, int col, float value)
{
if ( (row < D.h ) && (col < D.w ) )
D.e[row * D.w + col] = value;
}
//---------------------------------------------------------
//Device function to fill histo
// __device__ void HistoFill(int x, int y, DataFrame data,int *h, DataFrame circles, unsigned int *counter)
__device__ void HstFill(int x, int y, DataFrame data,DataFrame circles, unsigned int *counter)
{
const size_t HST_SIZE = GRID_SIZE*GRID_SIZE/BIN_SIZE;
int hst[HST_SIZE] = {0}; // va inizializzato
int i, idx;
// log_id(); printf("HstFill\n");
// atomicAdd(counter,1);
for ( i=0; i<data.h; i++)
{
// float xd, yd, d;
float xd, yd;
int d;
xd = GetElement(data, i, 0);
yd = GetElement(data, i, 1);
d = __float2int_rn( Distance2( __int2float_rn(x), __int2float_rn(y),xd,yd) ) / BIN_SIZE ;
if (d < HST_SIZE) hst[d] +=1; // questo if serve per evitare errori di accesso alla memoria nel caso di valori "strani"
// potrebbero lavorare piu' thread sullo stesso elemento
// va implementato un meccanismo di locking per evitarlo
// in piu' questo blocco va eseguito solo al termine del ciclo for, quando hai ormai analizzato tutti i punti
// altrimenti crei (n - TH) volte lo stesso cerchio
/*
if(hst[bin]>TH)
{
idx = atomicAdd(counter,1); // garantisce che l'incremento avvenga un thread alla volta, quindi idx puo' essere usato solo dal thread in esecuzione
SetElement(circles, idx,0,x);
SetElement(circles, idx,1,y);
// float r2=d;
// SetElement(circles, idx,2,r2);
SetElement(circles, idx,2,d);
}
*/
}
// dopo aver riempito l'array hst, scandisce tutti gli elementi e aggiunge alla lista dei cerchi il cerchio trovato
for ( i=0; i < HST_SIZE; i++)
{
if(hst[i]>TH)
{
idx = atomicAdd(counter,1); // garantisce che l'incremento avvenga un thread alla volta, quindi idx puo' essere usato solo dal thread in esecuzione
//log_msg(""); printf("counter: %d, x %f, y %f, radius: %f\n", idx, __int2float_rn(x), __int2float_rn(y), sqrt( __int2float_rn(i*BIN_SIZE) ) );
SetElement(circles, idx,0, __int2float_rn(x));
SetElement(circles, idx,1, __int2float_rn(y));
// float r2=d;
// SetElement(circles, idx,2,r2);
SetElement(circles, idx,2, sqrt( __int2float_rn(i*BIN_SIZE) ) );
}
}
}
//---------------------------------------------------------
//Histogram algorithm kernel
__global__ void GridKernel(DataFrame data, DataFrame circles,unsigned int *counter)
{
//x and y indexes
int x = blockIdx.x * blockDim.x+ threadIdx.x - (GRID_SIZE / 2);
int y = blockIdx.y * blockDim.y +threadIdx.y - (GRID_SIZE / 2);
// const size_t histo_size = GRID_SIZE*GRID_SIZE/BIN_SIZE;
// int histo[histo_size];
// se con histo non ci fai niente, e' inutile definirla qui e passarla alla funzione
// in piu' il contenuto non e' inizializzato
// HistoFill(x,y, data, histo, circles, counter);
HstFill(x,y, data, circles, counter);
}
//------------------------------------------------------------
//dump dataframe function
void dump(DataFrame m)
{
for (int i = 0; i< m.h; i++) // Loop sulle righe
{
for (int j = 0; j< m.w; j++) // Loop sulle colonne
printf("%3.1f\t", m.e[i*m.w + j]);
printf("\n"); // A capo di fine riga
}
printf("\n");
}
//------------------------------------------------------------
//read floats from file.dat
void readBin(std::vector<float> buf)
{
}
//-------------------------------------------------------------
int main(int argc, char** argv)
{
std::vector<float> buf;
std::ifstream in("file.dat",std::ios::binary); //open the input file
while(!in.eof())
{
float w;
in.read( (char*) &w, sizeof(w));
buf.push_back(w);
}
in.close(); //close the input file .... ormai i dati sono letti, puoi chiudere il file
DataFrame data, circles;
data.h = buf.size()>>1;
data.w = 2;
data.e = (float*) malloc(data.w * data.h * sizeof(float));
circles.h = 15;
circles.w = 3;
circles.e = (float*) malloc(circles.w * circles.h * sizeof(float));
data.e = &buf[0]; //copy imported floats from buf to data .... non stai copiando i dati, se fai cosi' non ti serve la memoria che hai allocato prima
float a = CircleFit(data, circles);
// dump(data);
dump(circles);
// in.close(); //close the input file
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <inttypes.h>
#include <vector>
#include <fstream> // I/O
//struct DataFrame
struct DataFrame
{
int w;
int h;
float* e;
};
//Thread block size
#define BLOCK_SIZE 16 //number of threads for each block
#define GRID_SIZE 128 //dimension of entire space
//warning GRID_SIZE must be multiple of 16
#define BIN_SIZE 4 //size of a hisogram bin
#define TH 30 //threshold for triggering the histogram
__global__ void GridKernel(const DataFrame, DataFrame, unsigned int*); //aggiungere i parametri
//------------------------------------------------------------
//Histogram algorithm HOST CODE
float CircleFit(const DataFrame data, DataFrame circles)
{
//load data to device memory
DataFrame d_data;
d_data.w = data.w;
d_data.h = data.h;
size_t size = data.w * data.h * sizeof(float);
hipError_t err = hipMalloc(&d_data.e, size);
if(err)
printf("CUDA malloc data DataFrame: %s\n",hipGetErrorString(err));
hipMemcpy(d_data.e, data.e, size, hipMemcpyHostToDevice);
//allocate circles
DataFrame d_circles;
d_circles.w = circles.w;
d_circles.h = circles.h;
size = circles.w * circles.h * sizeof(float);
err = hipMalloc(&d_circles.e, size);
if(err)
printf("CUDA malloc circles DataFrame: %s\n",hipGetErrorString(err));
//allocate variable for counting circles found
unsigned int *d_counter;
unsigned int h_counter;
hipMallocManaged(&d_counter, sizeof(unsigned int));
if(err)
printf("CUDA malloc counter variable: %s\n",hipGetErrorString(err));
// printf("initialized counter : %u\n", *counter);
// Hai assegnato al puntatore l'indirizzo di memoria da utilizzare, ma e' memoria della GPU, non puoi accedere da qui
// ma il valore nella cella puntata non e' inizializzato
hipMemset(d_counter, 0, sizeof(unsigned int));
hipMemcpy(&h_counter, d_counter, sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("initialized counter : %u\n", h_counter);
//
float time;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//start time
hipEventRecord(start);
// Define the geometry
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE/ dimBlock.x, GRID_SIZE/ dimBlock.y);
//warning GRID_SIZE must be multiple of 16
// Invoke kernel
GridKernel<<<dimGrid, dimBlock>>>(d_data, d_circles, d_counter);
err = hipDeviceSynchronize();
//stop time
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
if(err)
printf("Run kernel: %s\n", hipGetErrorString(err));
printf("Time: %3.5f ms\n",time);
// Read C from device memory
size = circles.w * circles.h * sizeof(float);
err = hipMemcpy(circles.e, d_circles.e, size, hipMemcpyDeviceToHost);
if(err)
printf("Copy circles off of device: %s\n",hipGetErrorString(err));
hipMemcpy(&h_counter, d_counter, sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("counter : %u\n", h_counter);
// Free device memory
hipFree(d_data.e);
hipFree(d_circles.e);
hipFree(d_counter);
return time;
} //END HOST FUNCTION
//---------------------------------------------------------
// thread aware log function
__device__ void log_msg(const char * message)
{
printf("%d.%d.%d.%d-%s", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, message);
}
//Device function to calculate distance
__device__ float Distance2(float x, float y, float xd, float yd)
{
return (x-xd)*(x-xd)+(y-yd)*(y-yd);
}
//---------------------------------------------------------
//Device function to Get Element from data
__device__ float GetElement(const DataFrame D, int row, int col)
{
if ( (row < D.h ) && (col < D.w ) )
return D.e[row * D.w + col];
else
return 0.0;
}
//---------------------------------------------------------------
//Device function to Set Element of data
__device__ void SetElement(DataFrame D, int row, int col, float value)
{
if ( (row < D.h ) && (col < D.w ) )
D.e[row * D.w + col] = value;
}
//---------------------------------------------------------
//Device function to fill histo
// __device__ void HistoFill(int x, int y, DataFrame data,int *h, DataFrame circles, unsigned int *counter)
__device__ void HstFill(int x, int y, DataFrame data,DataFrame circles, unsigned int *counter)
{
const size_t HST_SIZE = GRID_SIZE*GRID_SIZE/BIN_SIZE;
int hst[HST_SIZE] = {0}; // va inizializzato
int i, idx;
// log_id(); printf("HstFill\n");
// atomicAdd(counter,1);
for ( i=0; i<data.h; i++)
{
// float xd, yd, d;
float xd, yd;
int d;
xd = GetElement(data, i, 0);
yd = GetElement(data, i, 1);
d = __float2int_rn( Distance2( __int2float_rn(x), __int2float_rn(y),xd,yd) ) / BIN_SIZE ;
if (d < HST_SIZE) hst[d] +=1; // questo if serve per evitare errori di accesso alla memoria nel caso di valori "strani"
// potrebbero lavorare piu' thread sullo stesso elemento
// va implementato un meccanismo di locking per evitarlo
// in piu' questo blocco va eseguito solo al termine del ciclo for, quando hai ormai analizzato tutti i punti
// altrimenti crei (n - TH) volte lo stesso cerchio
/*
if(hst[bin]>TH)
{
idx = atomicAdd(counter,1); // garantisce che l'incremento avvenga un thread alla volta, quindi idx puo' essere usato solo dal thread in esecuzione
SetElement(circles, idx,0,x);
SetElement(circles, idx,1,y);
// float r2=d;
// SetElement(circles, idx,2,r2);
SetElement(circles, idx,2,d);
}
*/
}
// dopo aver riempito l'array hst, scandisce tutti gli elementi e aggiunge alla lista dei cerchi il cerchio trovato
for ( i=0; i < HST_SIZE; i++)
{
if(hst[i]>TH)
{
idx = atomicAdd(counter,1); // garantisce che l'incremento avvenga un thread alla volta, quindi idx puo' essere usato solo dal thread in esecuzione
//log_msg(""); printf("counter: %d, x %f, y %f, radius: %f\n", idx, __int2float_rn(x), __int2float_rn(y), sqrt( __int2float_rn(i*BIN_SIZE) ) );
SetElement(circles, idx,0, __int2float_rn(x));
SetElement(circles, idx,1, __int2float_rn(y));
// float r2=d;
// SetElement(circles, idx,2,r2);
SetElement(circles, idx,2, sqrt( __int2float_rn(i*BIN_SIZE) ) );
}
}
}
//---------------------------------------------------------
//Histogram algorithm kernel
__global__ void GridKernel(DataFrame data, DataFrame circles,unsigned int *counter)
{
//x and y indexes
int x = blockIdx.x * blockDim.x+ threadIdx.x - (GRID_SIZE / 2);
int y = blockIdx.y * blockDim.y +threadIdx.y - (GRID_SIZE / 2);
// const size_t histo_size = GRID_SIZE*GRID_SIZE/BIN_SIZE;
// int histo[histo_size];
// se con histo non ci fai niente, e' inutile definirla qui e passarla alla funzione
// in piu' il contenuto non e' inizializzato
// HistoFill(x,y, data, histo, circles, counter);
HstFill(x,y, data, circles, counter);
}
//------------------------------------------------------------
//dump dataframe function
void dump(DataFrame m)
{
for (int i = 0; i< m.h; i++) // Loop sulle righe
{
for (int j = 0; j< m.w; j++) // Loop sulle colonne
printf("%3.1f\t", m.e[i*m.w + j]);
printf("\n"); // A capo di fine riga
}
printf("\n");
}
//------------------------------------------------------------
//read floats from file.dat
void readBin(std::vector<float> buf)
{
}
//-------------------------------------------------------------
int main(int argc, char** argv)
{
std::vector<float> buf;
std::ifstream in("file.dat",std::ios::binary); //open the input file
while(!in.eof())
{
float w;
in.read( (char*) &w, sizeof(w));
buf.push_back(w);
}
in.close(); //close the input file .... ormai i dati sono letti, puoi chiudere il file
DataFrame data, circles;
data.h = buf.size()>>1;
data.w = 2;
data.e = (float*) malloc(data.w * data.h * sizeof(float));
circles.h = 15;
circles.w = 3;
circles.e = (float*) malloc(circles.w * circles.h * sizeof(float));
data.e = &buf[0]; //copy imported floats from buf to data .... non stai copiando i dati, se fai cosi' non ti serve la memoria che hai allocato prima
float a = CircleFit(data, circles);
// dump(data);
dump(circles);
// in.close(); //close the input file
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10GridKernel9DataFrameS_Pj
.globl _Z10GridKernel9DataFrameS_Pj
.p2align 8
.type _Z10GridKernel9DataFrameS_Pj,@function
_Z10GridKernel9DataFrameS_Pj:
s_clause 0x1
s_load_b32 s12, s[0:1], 0x34
s_load_b256 s[4:11], s[0:1], 0x0
v_dual_mov_b32 v1, 16 :: v_dual_mov_b32 v2, 0
s_mov_b64 s[2:3], 0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s13, s12, 16
.LBB0_1:
s_add_u32 s2, s2, 1
s_addc_u32 s3, s3, 0
scratch_store_b8 v1, v2, off
v_cmp_gt_u64_e64 s16, 0x4000, s[2:3]
v_add_nc_u32_e32 v1, 1, v1
s_delay_alu instid0(VALU_DEP_2)
s_and_b32 vcc_lo, exec_lo, s16
s_cbranch_vccnz .LBB0_1
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_and_b32 s2, 0xffff, s12
s_and_b32 s3, 0xffff, s13
s_mul_i32 s14, s14, s2
s_mul_i32 s15, s15, s3
v_add3_u32 v1, v1, s14, 0xffffffc0
v_add3_u32 v0, v0, s15, 0xffffffc0
s_cmp_lt_i32 s5, 1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cvt_f32_i32_e32 v2, v1
v_cvt_f32_i32_e32 v3, v0
s_cbranch_scc1 .LBB0_11
s_add_u32 s12, s6, 4
s_addc_u32 s13, s7, 0
s_cmp_gt_i32 s4, 0
s_mov_b32 s3, 0
s_cselect_b32 s14, -1, 0
s_cmp_gt_i32 s4, 1
s_mov_b32 s2, s3
s_cselect_b32 s15, -1, 0
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_5
.p2align 6
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s16
s_add_i32 s5, s5, -1
s_add_i32 s2, s2, s4
s_cmp_eq_u32 s5, 0
s_cbranch_scc1 .LBB0_11
.LBB0_5:
s_and_not1_b32 vcc_lo, exec_lo, s14
s_mov_b32 s16, 0
s_cbranch_vccnz .LBB0_7
s_lshl_b64 s[16:17], s[2:3], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s6, s16
s_addc_u32 s17, s7, s17
s_load_b32 s16, s[16:17], 0x0
.LBB0_7:
s_and_not1_b32 vcc_lo, exec_lo, s15
s_mov_b32 s17, 0
s_cbranch_vccnz .LBB0_9
s_lshl_b64 s[18:19], s[2:3], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s18, s12, s18
s_addc_u32 s19, s13, s19
s_load_b32 s17, s[18:19], 0x0
.LBB0_9:
s_waitcnt lgkmcnt(0)
v_dual_subrev_f32 v0, s17, v3 :: v_dual_subrev_f32 v1, s16, v2
s_mov_b32 s16, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v0, v0, v0
v_fmac_f32_e32 v0, v1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_rndne_f32_e32 v0, v0
v_cvt_i32_f32_e32 v0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshrrev_b32_e32 v1, 30, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v0, v0, v1
v_ashrrev_i32_e32 v0, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e32 0x1000, v0
s_cbranch_execz .LBB0_4
v_lshl_add_u32 v0, v0, 2, 16
scratch_load_b32 v1, v0, off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, 1, v1
scratch_store_b32 v0, v1, off
s_branch .LBB0_4
.LBB0_11:
s_set_inst_prefetch_distance 0x2
s_load_b64 s[2:3], s[0:1], 0x20
s_add_u32 s1, s10, 4
s_addc_u32 s4, s11, 0
s_add_u32 s5, s10, 8
s_addc_u32 s6, s11, 0
s_cmp_gt_i32 s8, 0
v_mov_b32_e32 v4, 0
s_cselect_b32 s7, -1, 0
s_cmp_gt_i32 s8, 1
s_mov_b32 s13, 0
s_cselect_b32 s12, -1, 0
s_cmp_gt_i32 s8, 2
s_cselect_b32 s14, -1, 0
s_branch .LBB0_13
.LBB0_12:
s_or_b32 exec_lo, exec_lo, s15
s_add_i32 s13, s13, 4
s_delay_alu instid0(SALU_CYCLE_1)
s_cmpk_lg_i32 s13, 0x4000
s_cbranch_scc0 .LBB0_22
.LBB0_13:
s_add_i32 s0, s13, 16
s_mov_b32 s15, exec_lo
scratch_load_b32 v0, off, s0
s_waitcnt vmcnt(0)
v_cmpx_lt_i32_e32 30, v0
s_cbranch_execz .LBB0_12
s_mov_b32 s16, exec_lo
s_mov_b32 s0, exec_lo
v_mbcnt_lo_u32_b32 v0, s16, 0
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_16
s_bcnt1_i32_b32 s16, s16
s_delay_alu instid0(SALU_CYCLE_1)
v_mov_b32_e32 v1, s16
s_waitcnt lgkmcnt(0)
global_atomic_add_u32 v1, v4, v1, s[2:3] glc
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s0
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, s0, v0
v_mul_lo_u32 v0, v1, s8
v_cmp_gt_i32_e32 vcc_lo, s9, v1
s_and_b32 s0, vcc_lo, s7
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s16, s0
s_cbranch_execz .LBB0_18
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[5:6], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v5, s0, s10, v5
v_add_co_ci_u32_e64 v6, s0, s11, v6, s0
global_store_b32 v[5:6], v2, off
.LBB0_18:
s_or_b32 exec_lo, exec_lo, s16
s_and_b32 s0, vcc_lo, s12
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s16, s0
s_cbranch_execz .LBB0_20
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[0:1]
v_add_co_u32 v5, s0, s1, v5
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v6, s0, s4, v6, s0
global_store_b32 v[5:6], v3, off
.LBB0_20:
s_or_b32 exec_lo, exec_lo, s16
s_and_b32 s0, vcc_lo, s14
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s0
s_cbranch_execz .LBB0_12
v_cvt_f32_i32_e32 v1, s13
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v1
v_mul_f32_e32 v5, 0x4f800000, v1
v_cndmask_b32_e32 v5, v1, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_sqrt_f32_e32 v1, v5
s_waitcnt_depctr 0xfff
v_add_nc_u32_e32 v6, -1, v1
v_add_nc_u32_e32 v7, 1, v1
v_fma_f32 v8, -v6, v1, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f32 v9, -v7, v1, v5
v_cmp_ge_f32_e64 s0, 0, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e64 v1, v1, v6, s0
v_cmp_lt_f32_e64 s0, 0, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v6, v1, v7, s0
v_ashrrev_i32_e32 v1, 31, v0
v_mul_f32_e32 v7, 0x37800000, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_cndmask_b32_e32 v6, v6, v7, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v5, 0x260
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cndmask_b32_e32 v5, v6, v5, vcc_lo
v_add_co_u32 v0, vcc_lo, s5, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s6, v1, vcc_lo
global_store_b32 v[0:1], v5, off
s_branch .LBB0_12
.LBB0_22:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10GridKernel9DataFrameS_Pj
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 16400
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 1
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 20
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10GridKernel9DataFrameS_Pj, .Lfunc_end0-_Z10GridKernel9DataFrameS_Pj
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 16
.value_kind: by_value
- .offset: 16
.size: 16
.value_kind: by_value
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10GridKernel9DataFrameS_Pj
.private_segment_fixed_size: 16400
.sgpr_count: 22
.sgpr_spill_count: 0
.symbol: _Z10GridKernel9DataFrameS_Pj.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <inttypes.h>
#include <vector>
#include <fstream> // I/O
//struct DataFrame
struct DataFrame
{
int w;
int h;
float* e;
};
//Thread block size
#define BLOCK_SIZE 16 //number of threads for each block
#define GRID_SIZE 128 //dimension of entire space
//warning GRID_SIZE must be multiple of 16
#define BIN_SIZE 4 //size of a hisogram bin
#define TH 30 //threshold for triggering the histogram
__global__ void GridKernel(const DataFrame, DataFrame, unsigned int*); //aggiungere i parametri
//------------------------------------------------------------
//Histogram algorithm HOST CODE
float CircleFit(const DataFrame data, DataFrame circles)
{
//load data to device memory
DataFrame d_data;
d_data.w = data.w;
d_data.h = data.h;
size_t size = data.w * data.h * sizeof(float);
hipError_t err = hipMalloc(&d_data.e, size);
if(err)
printf("CUDA malloc data DataFrame: %s\n",hipGetErrorString(err));
hipMemcpy(d_data.e, data.e, size, hipMemcpyHostToDevice);
//allocate circles
DataFrame d_circles;
d_circles.w = circles.w;
d_circles.h = circles.h;
size = circles.w * circles.h * sizeof(float);
err = hipMalloc(&d_circles.e, size);
if(err)
printf("CUDA malloc circles DataFrame: %s\n",hipGetErrorString(err));
//allocate variable for counting circles found
unsigned int *d_counter;
unsigned int h_counter;
hipMallocManaged(&d_counter, sizeof(unsigned int));
if(err)
printf("CUDA malloc counter variable: %s\n",hipGetErrorString(err));
// printf("initialized counter : %u\n", *counter);
// Hai assegnato al puntatore l'indirizzo di memoria da utilizzare, ma e' memoria della GPU, non puoi accedere da qui
// ma il valore nella cella puntata non e' inizializzato
hipMemset(d_counter, 0, sizeof(unsigned int));
hipMemcpy(&h_counter, d_counter, sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("initialized counter : %u\n", h_counter);
//
float time;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//start time
hipEventRecord(start);
// Define the geometry
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE/ dimBlock.x, GRID_SIZE/ dimBlock.y);
//warning GRID_SIZE must be multiple of 16
// Invoke kernel
GridKernel<<<dimGrid, dimBlock>>>(d_data, d_circles, d_counter);
err = hipDeviceSynchronize();
//stop time
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
if(err)
printf("Run kernel: %s\n", hipGetErrorString(err));
printf("Time: %3.5f ms\n",time);
// Read C from device memory
size = circles.w * circles.h * sizeof(float);
err = hipMemcpy(circles.e, d_circles.e, size, hipMemcpyDeviceToHost);
if(err)
printf("Copy circles off of device: %s\n",hipGetErrorString(err));
hipMemcpy(&h_counter, d_counter, sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("counter : %u\n", h_counter);
// Free device memory
hipFree(d_data.e);
hipFree(d_circles.e);
hipFree(d_counter);
return time;
} //END HOST FUNCTION
//---------------------------------------------------------
// thread aware log function
__device__ void log_msg(const char * message)
{
printf("%d.%d.%d.%d-%s", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, message);
}
//Device function to calculate distance
__device__ float Distance2(float x, float y, float xd, float yd)
{
return (x-xd)*(x-xd)+(y-yd)*(y-yd);
}
//---------------------------------------------------------
//Device function to Get Element from data
__device__ float GetElement(const DataFrame D, int row, int col)
{
if ( (row < D.h ) && (col < D.w ) )
return D.e[row * D.w + col];
else
return 0.0;
}
//---------------------------------------------------------------
//Device function to Set Element of data
__device__ void SetElement(DataFrame D, int row, int col, float value)
{
if ( (row < D.h ) && (col < D.w ) )
D.e[row * D.w + col] = value;
}
//---------------------------------------------------------
//Device function to fill histo
// __device__ void HistoFill(int x, int y, DataFrame data,int *h, DataFrame circles, unsigned int *counter)
__device__ void HstFill(int x, int y, DataFrame data,DataFrame circles, unsigned int *counter)
{
const size_t HST_SIZE = GRID_SIZE*GRID_SIZE/BIN_SIZE;
int hst[HST_SIZE] = {0}; // va inizializzato
int i, idx;
// log_id(); printf("HstFill\n");
// atomicAdd(counter,1);
for ( i=0; i<data.h; i++)
{
// float xd, yd, d;
float xd, yd;
int d;
xd = GetElement(data, i, 0);
yd = GetElement(data, i, 1);
d = __float2int_rn( Distance2( __int2float_rn(x), __int2float_rn(y),xd,yd) ) / BIN_SIZE ;
if (d < HST_SIZE) hst[d] +=1; // questo if serve per evitare errori di accesso alla memoria nel caso di valori "strani"
// potrebbero lavorare piu' thread sullo stesso elemento
// va implementato un meccanismo di locking per evitarlo
// in piu' questo blocco va eseguito solo al termine del ciclo for, quando hai ormai analizzato tutti i punti
// altrimenti crei (n - TH) volte lo stesso cerchio
/*
if(hst[bin]>TH)
{
idx = atomicAdd(counter,1); // garantisce che l'incremento avvenga un thread alla volta, quindi idx puo' essere usato solo dal thread in esecuzione
SetElement(circles, idx,0,x);
SetElement(circles, idx,1,y);
// float r2=d;
// SetElement(circles, idx,2,r2);
SetElement(circles, idx,2,d);
}
*/
}
// dopo aver riempito l'array hst, scandisce tutti gli elementi e aggiunge alla lista dei cerchi il cerchio trovato
for ( i=0; i < HST_SIZE; i++)
{
if(hst[i]>TH)
{
idx = atomicAdd(counter,1); // garantisce che l'incremento avvenga un thread alla volta, quindi idx puo' essere usato solo dal thread in esecuzione
//log_msg(""); printf("counter: %d, x %f, y %f, radius: %f\n", idx, __int2float_rn(x), __int2float_rn(y), sqrt( __int2float_rn(i*BIN_SIZE) ) );
SetElement(circles, idx,0, __int2float_rn(x));
SetElement(circles, idx,1, __int2float_rn(y));
// float r2=d;
// SetElement(circles, idx,2,r2);
SetElement(circles, idx,2, sqrt( __int2float_rn(i*BIN_SIZE) ) );
}
}
}
//---------------------------------------------------------
//Histogram algorithm kernel
__global__ void GridKernel(DataFrame data, DataFrame circles,unsigned int *counter)
{
//x and y indexes
int x = blockIdx.x * blockDim.x+ threadIdx.x - (GRID_SIZE / 2);
int y = blockIdx.y * blockDim.y +threadIdx.y - (GRID_SIZE / 2);
// const size_t histo_size = GRID_SIZE*GRID_SIZE/BIN_SIZE;
// int histo[histo_size];
// se con histo non ci fai niente, e' inutile definirla qui e passarla alla funzione
// in piu' il contenuto non e' inizializzato
// HistoFill(x,y, data, histo, circles, counter);
HstFill(x,y, data, circles, counter);
}
//------------------------------------------------------------
//dump dataframe function
void dump(DataFrame m)
{
for (int i = 0; i< m.h; i++) // Loop sulle righe
{
for (int j = 0; j< m.w; j++) // Loop sulle colonne
printf("%3.1f\t", m.e[i*m.w + j]);
printf("\n"); // A capo di fine riga
}
printf("\n");
}
//------------------------------------------------------------
//read floats from file.dat
void readBin(std::vector<float> buf)
{
}
//-------------------------------------------------------------
int main(int argc, char** argv)
{
std::vector<float> buf;
std::ifstream in("file.dat",std::ios::binary); //open the input file
while(!in.eof())
{
float w;
in.read( (char*) &w, sizeof(w));
buf.push_back(w);
}
in.close(); //close the input file .... ormai i dati sono letti, puoi chiudere il file
DataFrame data, circles;
data.h = buf.size()>>1;
data.w = 2;
data.e = (float*) malloc(data.w * data.h * sizeof(float));
circles.h = 15;
circles.w = 3;
circles.e = (float*) malloc(circles.w * circles.h * sizeof(float));
data.e = &buf[0]; //copy imported floats from buf to data .... non stai copiando i dati, se fai cosi' non ti serve la memoria che hai allocato prima
float a = CircleFit(data, circles);
// dump(data);
dump(circles);
// in.close(); //close the input file
return 0;
} | .text
.file "reticolo.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z9CircleFit9DataFrameS_ # -- Begin function _Z9CircleFit9DataFrameS_
.p2align 4, 0x90
.type _Z9CircleFit9DataFrameS_,@function
_Z9CircleFit9DataFrameS_: # @_Z9CircleFit9DataFrameS_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rcx, %rbx
movq %rdx, %r14
movq %rsi, %r15
movq %rdx, %r13
shrq $32, %r13
movq %rdi, 48(%rsp)
movq %rdi, %rax
shrq $32, %rax
imull %edi, %eax
movslq %eax, %r12
shlq $2, %r12
leaq 56(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
je .LBB0_2
# %bb.1:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
.LBB0_2:
movq 56(%rsp), %rdi
movq %r15, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r14d, 32(%rsp)
movl %r13d, 36(%rsp)
imull %r13d, %r14d
movslq %r14d, %r14
shlq $2, %r14
leaq 40(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
testl %eax, %eax
je .LBB0_4
# %bb.3:
movl %eax, %ebp
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
leaq 8(%rsp), %rdi
movl $4, %esi
movl $1, %edx
callq hipMallocManaged
movl %ebp, %edi
callq hipGetErrorString
movl $.L.str.2, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
jmp .LBB0_5
.LBB0_4: # %.critedge
leaq 8(%rsp), %rdi
movl $4, %esi
movl $1, %edx
callq hipMallocManaged
.LBB0_5:
movq 8(%rsp), %rdi
movl $4, %edx
xorl %esi, %esi
callq hipMemset
movq 8(%rsp), %rsi
leaq 4(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl 4(%rsp), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
leaq 24(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
callq hipEventCreate
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $34359738376, %rdi # imm = 0x800000008
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB0_7
# %bb.6:
movups 48(%rsp), %xmm0
movups 32(%rsp), %xmm1
movq 8(%rsp), %rax
movups %xmm0, 168(%rsp)
movups %xmm1, 152(%rsp)
movq %rax, 144(%rsp)
leaq 168(%rsp), %rax
movq %rax, 64(%rsp)
leaq 152(%rsp), %rax
movq %rax, 72(%rsp)
leaq 144(%rsp), %rax
movq %rax, 80(%rsp)
leaq 128(%rsp), %rdi
leaq 112(%rsp), %rsi
leaq 104(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 112(%rsp), %rcx
movl 120(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z10GridKernel9DataFrameS_Pj, %edi
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB0_7:
callq hipDeviceSynchronize
movl %eax, %ebp
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 64(%rsp), %rdi
callq hipEventElapsedTime
testl %ebp, %ebp
je .LBB0_9
# %bb.8:
movl %ebp, %edi
callq hipGetErrorString
movl $.L.str.4, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
.LBB0_9:
movss 64(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.5, %edi
movb $1, %al
callq printf
movq 40(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB0_11
# %bb.10:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.6, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
.LBB0_11:
movq 8(%rsp), %rsi
leaq 4(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl 4(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movq 56(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movss 64(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z9CircleFit9DataFrameS_, .Lfunc_end0-_Z9CircleFit9DataFrameS_
.cfi_endproc
# -- End function
.globl _Z25__device_stub__GridKernel9DataFrameS_Pj # -- Begin function _Z25__device_stub__GridKernel9DataFrameS_Pj
.p2align 4, 0x90
.type _Z25__device_stub__GridKernel9DataFrameS_Pj,@function
_Z25__device_stub__GridKernel9DataFrameS_Pj: # @_Z25__device_stub__GridKernel9DataFrameS_Pj
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 80(%rsp)
movq %rsi, 88(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 72(%rsp)
movq %r8, 56(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z10GridKernel9DataFrameS_Pj, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z25__device_stub__GridKernel9DataFrameS_Pj, .Lfunc_end1-_Z25__device_stub__GridKernel9DataFrameS_Pj
.cfi_endproc
# -- End function
.globl _Z4dump9DataFrame # -- Begin function _Z4dump9DataFrame
.p2align 4, 0x90
.type _Z4dump9DataFrame,@function
_Z4dump9DataFrame: # @_Z4dump9DataFrame
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, 8(%rsp) # 8-byte Spill
movq %rdi, %rax
shrq $32, %rax
movq %rax, 16(%rsp) # 8-byte Spill
testl %eax, %eax
jle .LBB2_6
# %bb.1: # %.preheader.lr.ph
movq %rdi, %r14
movl %r14d, %r12d
xorl %r13d, %r13d
xorl %ebp, %ebp
jmp .LBB2_2
.p2align 4, 0x90
.LBB2_5: # %._crit_edge
# in Loop: Header=BB2_2 Depth=1
movl $10, %edi
callq putchar@PLT
incq %rbp
addl %r14d, %r13d
cmpq 16(%rsp), %rbp # 8-byte Folded Reload
je .LBB2_6
.LBB2_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_4 Depth 2
testl %r14d, %r14d
jle .LBB2_5
# %bb.3: # %.lr.ph
# in Loop: Header=BB2_2 Depth=1
movl %r13d, %eax
movq 8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbx
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_4: # Parent Loop BB2_2 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
incq %r15
cmpq %r15, %r12
jne .LBB2_4
jmp .LBB2_5
.LBB2_6: # %._crit_edge11
movl $10, %edi
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp putchar@PLT # TAILCALL
.Lfunc_end2:
.size _Z4dump9DataFrame, .Lfunc_end2-_Z4dump9DataFrame
.cfi_endproc
# -- End function
.globl _Z7readBinSt6vectorIfSaIfEE # -- Begin function _Z7readBinSt6vectorIfSaIfEE
.p2align 4, 0x90
.type _Z7readBinSt6vectorIfSaIfEE,@function
_Z7readBinSt6vectorIfSaIfEE: # @_Z7readBinSt6vectorIfSaIfEE
.cfi_startproc
# %bb.0:
retq
.Lfunc_end3:
.size _Z7readBinSt6vectorIfSaIfEE, .Lfunc_end3-_Z7readBinSt6vectorIfSaIfEE
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 592
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
.Ltmp0:
leaq 16(%rsp), %rdi
movl $.L.str.10, %esi
movl $4, %edx
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1EPKcSt13_Ios_Openmode
.Ltmp1:
# %bb.1: # %.preheader
movq 16(%rsp), %rax
movq -24(%rax), %rax
xorl %ebx, %ebx
testb $2, 48(%rsp,%rax)
movl $0, %r14d
jne .LBB4_23
# %bb.2: # %.lr.ph.preheader
xorl %ebp, %ebp
leaq 12(%rsp), %r12
xorl %r14d, %r14d
xorl %ebx, %ebx
jmp .LBB4_3
.p2align 4, 0x90
.LBB4_5: # in Loop: Header=BB4_3 Depth=1
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%r14)
.LBB4_22: # %_ZNSt6vectorIfSaIfEE9push_backERKf.exit
# in Loop: Header=BB4_3 Depth=1
addq $4, %r14
movq 16(%rsp), %rax
movq -24(%rax), %rax
testb $2, 48(%rsp,%rax)
jne .LBB4_23
.LBB4_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
.Ltmp3:
movl $4, %edx
leaq 16(%rsp), %rdi
movq %r12, %rsi
callq _ZNSi4readEPcl
.Ltmp4:
# %bb.4: # in Loop: Header=BB4_3 Depth=1
cmpq %rbp, %r14
jne .LBB4_5
# %bb.6: # in Loop: Header=BB4_3 Depth=1
subq %rbx, %r14
movabsq $9223372036854775804, %rax # imm = 0x7FFFFFFFFFFFFFFC
cmpq %rax, %r14
je .LBB4_7
# %bb.9: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i
# in Loop: Header=BB4_3 Depth=1
movq %r14, %r15
sarq $2, %r15
cmpq $1, %r15
movq %r15, %rax
adcq $0, %rax
leaq (%rax,%r15), %rcx
movabsq $2305843009213693951, %rbp # imm = 0x1FFFFFFFFFFFFFFF
cmpq %rbp, %rcx
jae .LBB4_10
# %bb.11: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i
# in Loop: Header=BB4_3 Depth=1
addq %r15, %rax
jae .LBB4_12
.LBB4_13: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i
# in Loop: Header=BB4_3 Depth=1
testq %rbp, %rbp
je .LBB4_14
.LBB4_15: # in Loop: Header=BB4_3 Depth=1
leaq (,%rbp,4), %rdi
.Ltmp5:
callq _Znwm
.Ltmp6:
# %bb.16: # in Loop: Header=BB4_3 Depth=1
movq %rax, %r13
jmp .LBB4_17
.LBB4_10: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i
# in Loop: Header=BB4_3 Depth=1
movq %rbp, %rcx
addq %r15, %rax
jb .LBB4_13
.LBB4_12: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i
# in Loop: Header=BB4_3 Depth=1
movq %rcx, %rbp
testq %rbp, %rbp
jne .LBB4_15
.LBB4_14: # in Loop: Header=BB4_3 Depth=1
xorl %r13d, %r13d
.LBB4_17: # %_ZNSt12_Vector_baseIfSaIfEE11_M_allocateEm.exit.i.i
# in Loop: Header=BB4_3 Depth=1
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%r13,%r15,4)
testq %r14, %r14
jle .LBB4_19
# %bb.18: # in Loop: Header=BB4_3 Depth=1
movq %r13, %rdi
movq %rbx, %rsi
movq %r14, %rdx
callq memmove@PLT
.LBB4_19: # %_ZNSt6vectorIfSaIfEE11_S_relocateEPfS2_S2_RS0_.exit.i.i
# in Loop: Header=BB4_3 Depth=1
testq %rbx, %rbx
je .LBB4_21
# %bb.20: # in Loop: Header=BB4_3 Depth=1
movq %rbx, %rdi
callq _ZdlPv
.LBB4_21: # %_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_.exit.i
# in Loop: Header=BB4_3 Depth=1
addq %r13, %r14
leaq (,%rbp,4), %rbp
addq %r13, %rbp
movq %r13, %rbx
jmp .LBB4_22
.LBB4_23: # %._crit_edge
leaq 32(%rsp), %rdi
.Ltmp8:
callq _ZNSt13basic_filebufIcSt11char_traitsIcEE5closeEv
.Ltmp9:
# %bb.24: # %.noexc17
testq %rax, %rax
jne .LBB4_26
# %bb.25:
movq 16(%rsp), %rax
movq -24(%rax), %rax
leaq (%rsp,%rax), %rdi
addq $16, %rdi
movl 48(%rsp,%rax), %esi
orl $4, %esi
.Ltmp10:
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.Ltmp11:
.LBB4_26: # %_ZNSt14basic_ifstreamIcSt11char_traitsIcEE5closeEv.exit
subq %rbx, %r14
andq $-8, %r14
shlq $29, %r14
orq $2, %r14
movl $180, %edi
callq malloc
movq %rax, %r15
.Ltmp13:
movabsq $64424509443, %rdx # imm = 0xF00000003
movq %r14, %rdi
movq %rbx, %rsi
movq %rax, %rcx
callq _Z9CircleFit9DataFrameS_
.Ltmp14:
# %bb.27: # %.preheader.i.preheader
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB4_28: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB4_29 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB4_29: # Parent Loop BB4_28 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
incq %r12
cmpq $3, %r12
jne .LBB4_29
# %bb.30: # %._crit_edge.i
# in Loop: Header=BB4_28 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $12, %r15
cmpq $15, %r14
jne .LBB4_28
# %bb.31: # %_Z4dump9DataFrame.exit
movl $10, %edi
callq putchar@PLT
leaq 16(%rsp), %rdi
movl $_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE, %esi
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev
leaq 272(%rsp), %rdi
callq _ZNSt8ios_baseD2Ev
testq %rbx, %rbx
je .LBB4_33
# %bb.32:
movq %rbx, %rdi
callq _ZdlPv
.LBB4_33: # %_ZNSt6vectorIfSaIfEED2Ev.exit
xorl %eax, %eax
addq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB4_7:
.cfi_def_cfa_offset 592
.Ltmp16:
movl $.L.str.11, %edi
callq _ZSt20__throw_length_errorPKc
.Ltmp17:
# %bb.8: # %.noexc
.LBB4_38:
.Ltmp15:
jmp .LBB4_39
.LBB4_34:
.Ltmp2:
movq %rax, %r14
xorl %ebx, %ebx
jmp .LBB4_40
.LBB4_35:
.Ltmp12:
jmp .LBB4_39
.LBB4_37: # %.loopexit.split-lp
.Ltmp18:
jmp .LBB4_39
.LBB4_36: # %.loopexit
.Ltmp7:
.LBB4_39:
movq %rax, %r14
leaq 16(%rsp), %rdi
movl $_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE, %esi
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev
leaq 272(%rsp), %rdi
callq _ZNSt8ios_baseD2Ev
.LBB4_40:
testq %rbx, %rbx
je .LBB4_42
# %bb.41:
movq %rbx, %rdi
callq _ZdlPv
.LBB4_42: # %_ZNSt6vectorIfSaIfEED2Ev.exit21
movq %r14, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table4:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1
.uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2
.byte 0 # On action: cleanup
.uleb128 .Ltmp3-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp6-.Ltmp3 # Call between .Ltmp3 and .Ltmp6
.uleb128 .Ltmp7-.Lfunc_begin0 # jumps to .Ltmp7
.byte 0 # On action: cleanup
.uleb128 .Ltmp6-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp8-.Ltmp6 # Call between .Ltmp6 and .Ltmp8
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp8-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp11-.Ltmp8 # Call between .Ltmp8 and .Ltmp11
.uleb128 .Ltmp12-.Lfunc_begin0 # jumps to .Ltmp12
.byte 0 # On action: cleanup
.uleb128 .Ltmp13-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Ltmp14-.Ltmp13 # Call between .Ltmp13 and .Ltmp14
.uleb128 .Ltmp15-.Lfunc_begin0 # jumps to .Ltmp15
.byte 0 # On action: cleanup
.uleb128 .Ltmp16-.Lfunc_begin0 # >> Call Site 6 <<
.uleb128 .Ltmp17-.Ltmp16 # Call between .Ltmp16 and .Ltmp17
.uleb128 .Ltmp18-.Lfunc_begin0 # jumps to .Ltmp18
.byte 0 # On action: cleanup
.uleb128 .Ltmp17-.Lfunc_begin0 # >> Call Site 7 <<
.uleb128 .Lfunc_end4-.Ltmp17 # Call between .Ltmp17 and .Lfunc_end4
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.p2align 2, 0x0
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10GridKernel9DataFrameS_Pj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "CUDA malloc data DataFrame: %s\n"
.size .L.str, 32
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "CUDA malloc circles DataFrame: %s\n"
.size .L.str.1, 35
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "CUDA malloc counter variable: %s\n"
.size .L.str.2, 34
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "initialized counter : %u\n"
.size .L.str.3, 26
.type _Z10GridKernel9DataFrameS_Pj,@object # @_Z10GridKernel9DataFrameS_Pj
.section .rodata,"a",@progbits
.globl _Z10GridKernel9DataFrameS_Pj
.p2align 3, 0x0
_Z10GridKernel9DataFrameS_Pj:
.quad _Z25__device_stub__GridKernel9DataFrameS_Pj
.size _Z10GridKernel9DataFrameS_Pj, 8
.type .L.str.4,@object # @.str.4
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.4:
.asciz "Run kernel: %s\n"
.size .L.str.4, 16
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Time: %3.5f ms\n"
.size .L.str.5, 16
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Copy circles off of device: %s\n"
.size .L.str.6, 32
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "counter : %u\n"
.size .L.str.7, 14
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%3.1f\t"
.size .L.str.8, 7
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "file.dat"
.size .L.str.10, 9
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "vector::_M_realloc_insert"
.size .L.str.11, 26
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10GridKernel9DataFrameS_Pj"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__GridKernel9DataFrameS_Pj
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Unwind_Resume
.addrsig_sym _Z10GridKernel9DataFrameS_Pj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0004840e_00000000-6_reticolo.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4184:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4184:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7log_msgPKc
.type _Z7log_msgPKc, @function
_Z7log_msgPKc:
.LFB4164:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE4164:
.size _Z7log_msgPKc, .-_Z7log_msgPKc
.globl _Z9Distance2ffff
.type _Z9Distance2ffff, @function
_Z9Distance2ffff:
.LFB4165:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE4165:
.size _Z9Distance2ffff, .-_Z9Distance2ffff
.globl _Z10GetElement9DataFrameii
.type _Z10GetElement9DataFrameii, @function
_Z10GetElement9DataFrameii:
.LFB4166:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE4166:
.size _Z10GetElement9DataFrameii, .-_Z10GetElement9DataFrameii
.globl _Z10SetElement9DataFrameiif
.type _Z10SetElement9DataFrameiif, @function
_Z10SetElement9DataFrameiif:
.LFB4167:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE4167:
.size _Z10SetElement9DataFrameiif, .-_Z10SetElement9DataFrameiif
.globl _Z7HstFillii9DataFrameS_Pj
.type _Z7HstFillii9DataFrameS_Pj, @function
_Z7HstFillii9DataFrameS_Pj:
.LFB4168:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE4168:
.size _Z7HstFillii9DataFrameS_Pj, .-_Z7HstFillii9DataFrameS_Pj
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%3.1f\t"
.LC1:
.string "\n"
.text
.globl _Z4dump9DataFrame
.type _Z4dump9DataFrame, @function
_Z4dump9DataFrame:
.LFB4169:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %rax
sarq $32, %rax
movl %eax, 16(%rsp)
movl %edi, 12(%rsp)
testl %eax, %eax
jle .L14
movq %rsi, %r12
movl %edi, 20(%rsp)
movl $0, %r15d
movl $0, %r14d
movslq %edi, %rax
movq %rax, 24(%rsp)
leaq .LC0(%rip), %r13
jmp .L15
.L17:
movslq %r15d, %rbp
leaq 0(,%rbp,4), %rbx
movq 24(%rsp), %rax
addq %rax, %rbp
salq $2, %rbp
.L16:
pxor %xmm0, %xmm0
cvtss2sd (%rbx,%r12), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L16
.L18:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r14d
movl 20(%rsp), %eax
addl %eax, %r15d
movl 16(%rsp), %eax
cmpl %eax, %r14d
je .L14
.L15:
cmpl $0, 12(%rsp)
jg .L17
jmp .L18
.L14:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4169:
.size _Z4dump9DataFrame, .-_Z4dump9DataFrame
.globl _Z7readBinSt6vectorIfSaIfEE
.type _Z7readBinSt6vectorIfSaIfEE, @function
_Z7readBinSt6vectorIfSaIfEE:
.LFB4170:
.cfi_startproc
endbr64
ret
.cfi_endproc
.LFE4170:
.size _Z7readBinSt6vectorIfSaIfEE, .-_Z7readBinSt6vectorIfSaIfEE
.globl _Z42__device_stub__Z10GridKernel9DataFrameS_PjR9DataFrameS0_Pj
.type _Z42__device_stub__Z10GridKernel9DataFrameS_PjR9DataFrameS0_Pj, @function
_Z42__device_stub__Z10GridKernel9DataFrameS_PjR9DataFrameS0_Pj:
.LFB4206:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
movq %rdi, 80(%rsp)
movq %rsi, 88(%rsp)
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L26
.L22:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L27
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z10GridKernel9DataFrameS_Pj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L22
.L27:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4206:
.size _Z42__device_stub__Z10GridKernel9DataFrameS_PjR9DataFrameS0_Pj, .-_Z42__device_stub__Z10GridKernel9DataFrameS_PjR9DataFrameS0_Pj
.globl _Z10GridKernel9DataFrameS_Pj
.type _Z10GridKernel9DataFrameS_Pj, @function
_Z10GridKernel9DataFrameS_Pj:
.LFB4207:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %rdi, 16(%rsp)
movq %rsi, 24(%rsp)
movq %rdx, (%rsp)
movq %rcx, 8(%rsp)
movq %r8, %rdx
movq %rsp, %rsi
leaq 16(%rsp), %rdi
call _Z42__device_stub__Z10GridKernel9DataFrameS_PjR9DataFrameS0_Pj
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4207:
.size _Z10GridKernel9DataFrameS_Pj, .-_Z10GridKernel9DataFrameS_Pj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "CUDA malloc data DataFrame: %s\n"
.align 8
.LC3:
.string "CUDA malloc circles DataFrame: %s\n"
.align 8
.LC4:
.string "CUDA malloc counter variable: %s\n"
.section .rodata.str1.1
.LC5:
.string "initialized counter : %u\n"
.LC6:
.string "Run kernel: %s\n"
.LC7:
.string "Time: %3.5f ms\n"
.section .rodata.str1.8
.align 8
.LC8:
.string "Copy circles off of device: %s\n"
.section .rodata.str1.1
.LC9:
.string "counter : %u\n"
.text
.globl _Z9CircleFit9DataFrameS_
.type _Z9CircleFit9DataFrameS_, @function
_Z9CircleFit9DataFrameS_:
.LFB4163:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $144, %rsp
.cfi_def_cfa_offset 192
movq %rsi, %r13
movq %rdx, %rbx
movq %rcx, %r12
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movq %rdi, %rax
sarq $32, %rax
movq %rdx, %r14
sarq $32, %r14
movl %edi, 64(%rsp)
movl %eax, 68(%rsp)
imull %eax, %edi
movslq %edi, %rbp
salq $2, %rbp
leaq 72(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L40
.L31:
movl $1, %ecx
movq %rbp, %rdx
movq %r13, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebx, 80(%rsp)
movl %r14d, 84(%rsp)
imull %r14d, %ebx
movslq %ebx, %rbx
salq $2, %rbx
leaq 88(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L41
leaq 16(%rsp), %rdi
movl $1, %edx
movl $4, %esi
call cudaMallocManaged@PLT
.L36:
movl $4, %edx
movl $0, %esi
movq 16(%rsp), %rdi
call cudaMemset@PLT
leaq 12(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movl 12(%rsp), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 24(%rsp), %rdi
call cudaEventCreate@PLT
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movl $8, 52(%rsp)
movl $8, 56(%rsp)
movl $16, 40(%rsp)
movl $16, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movl $1, %ecx
movq 52(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L42
.L33:
call cudaDeviceSynchronize@PLT
movl %eax, %ebp
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movq 32(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 112(%rsp), %rdi
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
call cudaEventElapsedTime@PLT
testl %ebp, %ebp
jne .L43
.L34:
pxor %xmm0, %xmm0
cvtss2sd 112(%rsp), %xmm0
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 88(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L44
.L35:
leaq 12(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movl 12(%rsp), %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movss 112(%rsp), %xmm0
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L45
addq $144, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L40:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L31
.L41:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 16(%rsp), %rdi
movl $1, %edx
movl $4, %esi
call cudaMallocManaged@PLT
movl %ebp, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L36
.L42:
movdqa 64(%rsp), %xmm1
movaps %xmm1, 96(%rsp)
movdqa 80(%rsp), %xmm2
movaps %xmm2, 112(%rsp)
leaq 112(%rsp), %rsi
leaq 96(%rsp), %rdi
movq 16(%rsp), %rdx
call _Z42__device_stub__Z10GridKernel9DataFrameS_PjR9DataFrameS0_Pj
jmp .L33
.L43:
movl %ebp, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L34
.L44:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L35
.L45:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4163:
.size _Z9CircleFit9DataFrameS_, .-_Z9CircleFit9DataFrameS_
.section .rodata.str1.1
.LC10:
.string "_Z10GridKernel9DataFrameS_Pj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4209:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z10GridKernel9DataFrameS_Pj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4209:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .rodata._ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_.str1.1,"aMS",@progbits,1
.LC11:
.string "vector::_M_realloc_insert"
.section .text._ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_,"axG",@progbits,_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_,comdat
.align 2
.weak _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_
.type _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_, @function
_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_:
.LFB4729:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rsi, (%rsp)
movq %rdx, 8(%rsp)
movq 8(%rdi), %rbp
movq (%rdi), %r13
movq %rbp, %rax
subq %r13, %rax
sarq $2, %rax
movabsq $2305843009213693951, %rdx
cmpq %rdx, %rax
je .L65
movq %rdi, %rbx
cmpq %r13, %rbp
movl $1, %edx
cmovne %rax, %rdx
addq %rdx, %rax
jc .L51
movabsq $2305843009213693951, %r14
cmpq %r14, %rax
cmovbe %rax, %r14
movq (%rsp), %r15
subq %r13, %r15
movl $0, %r12d
testq %rax, %rax
je .L52
jmp .L59
.L65:
leaq .LC11(%rip), %rdi
call _ZSt20__throw_length_errorPKc@PLT
.L66:
movq %r15, %rdx
movq %r13, %rsi
movq %r12, %rdi
call memmove@PLT
leaq 4(%r12,%r15), %r15
movq (%rsp), %rax
subq %rax, %rbp
testq %rbp, %rbp
jg .L54
addq %rbp, %r15
movq 16(%rbx), %rsi
subq %r13, %rsi
jmp .L58
.L51:
movq (%rsp), %r15
subq %r13, %r15
movabsq $2305843009213693951, %r14
.L59:
leaq 0(,%r14,4), %rdi
call _Znwm@PLT
movq %rax, %r12
.L52:
movq 8(%rsp), %rax
movss (%rax), %xmm0
movss %xmm0, (%r12,%r15)
testq %r15, %r15
jg .L66
leaq 4(%r12,%r15), %r15
movq (%rsp), %rax
subq %rax, %rbp
testq %rbp, %rbp
jle .L56
.L54:
movq %rbp, %rdx
movq (%rsp), %rsi
movq %r15, %rdi
call memcpy@PLT
.L56:
addq %rbp, %r15
testq %r13, %r13
je .L57
movq 16(%rbx), %rsi
subq %r13, %rsi
.L58:
movq %r13, %rdi
call _ZdlPvm@PLT
.L57:
movq %r12, (%rbx)
movq %r15, 8(%rbx)
leaq (%r12,%r14,4), %rax
movq %rax, 16(%rbx)
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4729:
.size _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_, .-_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_
.section .rodata.str1.1
.LC12:
.string "file.dat"
.text
.globl main
.type main, @function
main:
.LFB4171:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4171
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $576, %rsp
.cfi_def_cfa_offset 608
movq %fs:40, %rax
movq %rax, 568(%rsp)
xorl %eax, %eax
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movq $0, 32(%rsp)
leaq 48(%rsp), %rdi
movl $4, %edx
leaq .LC12(%rip), %rsi
.LEHB0:
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1EPKcSt13_Ios_Openmode@PLT
.LEHE0:
testb $2, 336(%rsp)
jne .L68
leaq 12(%rsp), %rbx
jmp .L71
.L81:
movq 24(%rsp), %rsi
cmpq 32(%rsp), %rsi
je .L69
movss 12(%rsp), %xmm0
movss %xmm0, (%rsi)
addq $4, %rsi
movq %rsi, 24(%rsp)
.L70:
testb $2, 336(%rsp)
jne .L68
.L71:
leaq 48(%rsp), %rdi
movl $4, %edx
movq %rbx, %rsi
.LEHB1:
call _ZNSi4readEPcl@PLT
jmp .L81
.L69:
leaq 16(%rsp), %rdi
movq %rbx, %rdx
call _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_
jmp .L70
.L68:
leaq 48(%rsp), %rdi
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEE5closeEv@PLT
movq 16(%rsp), %rbp
movq 24(%rsp), %rbx
subq %rbp, %rbx
sarq $2, %rbx
shrq %rbx
movl $180, %edi
call malloc@PLT
movq %rax, %r12
salq $32, %rbx
movq %rbx, %rdi
orq $2, %rdi
movabsq $64424509443, %rbx
movq %rbx, %rdx
movq %rax, %rcx
movq %rbp, %rsi
call _Z9CircleFit9DataFrameS_
movq %rbx, %rdi
movq %r12, %rsi
call _Z4dump9DataFrame
.LEHE1:
leaq 48(%rsp), %rdi
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT
testq %rbp, %rbp
je .L72
movq 32(%rsp), %rsi
subq %rbp, %rsi
movq %rbp, %rdi
call _ZdlPvm@PLT
.L72:
movq 568(%rsp), %rax
subq %fs:40, %rax
jne .L82
movl $0, %eax
addq $576, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L77:
.cfi_restore_state
endbr64
movq %rax, %rbx
leaq 48(%rsp), %rdi
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT
movq 16(%rsp), %rdi
movq 32(%rsp), %rsi
subq %rdi, %rsi
testq %rdi, %rdi
je .L74
call _ZdlPvm@PLT
.L74:
movq 568(%rsp), %rax
subq %fs:40, %rax
je .L75
call __stack_chk_fail@PLT
.L75:
movq %rbx, %rdi
.LEHB2:
call _Unwind_Resume@PLT
.LEHE2:
.L82:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4171:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA4171:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE4171-.LLSDACSB4171
.LLSDACSB4171:
.uleb128 .LEHB0-.LFB4171
.uleb128 .LEHE0-.LEHB0
.uleb128 0
.uleb128 0
.uleb128 .LEHB1-.LFB4171
.uleb128 .LEHE1-.LEHB1
.uleb128 .L77-.LFB4171
.uleb128 0
.uleb128 .LEHB2-.LFB4171
.uleb128 .LEHE2-.LEHB2
.uleb128 0
.uleb128 0
.LLSDACSE4171:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "reticolo.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z9CircleFit9DataFrameS_ # -- Begin function _Z9CircleFit9DataFrameS_
.p2align 4, 0x90
.type _Z9CircleFit9DataFrameS_,@function
_Z9CircleFit9DataFrameS_: # @_Z9CircleFit9DataFrameS_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rcx, %rbx
movq %rdx, %r14
movq %rsi, %r15
movq %rdx, %r13
shrq $32, %r13
movq %rdi, 48(%rsp)
movq %rdi, %rax
shrq $32, %rax
imull %edi, %eax
movslq %eax, %r12
shlq $2, %r12
leaq 56(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
je .LBB0_2
# %bb.1:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
.LBB0_2:
movq 56(%rsp), %rdi
movq %r15, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r14d, 32(%rsp)
movl %r13d, 36(%rsp)
imull %r13d, %r14d
movslq %r14d, %r14
shlq $2, %r14
leaq 40(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
testl %eax, %eax
je .LBB0_4
# %bb.3:
movl %eax, %ebp
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
leaq 8(%rsp), %rdi
movl $4, %esi
movl $1, %edx
callq hipMallocManaged
movl %ebp, %edi
callq hipGetErrorString
movl $.L.str.2, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
jmp .LBB0_5
.LBB0_4: # %.critedge
leaq 8(%rsp), %rdi
movl $4, %esi
movl $1, %edx
callq hipMallocManaged
.LBB0_5:
movq 8(%rsp), %rdi
movl $4, %edx
xorl %esi, %esi
callq hipMemset
movq 8(%rsp), %rsi
leaq 4(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl 4(%rsp), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
leaq 24(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
callq hipEventCreate
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $34359738376, %rdi # imm = 0x800000008
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB0_7
# %bb.6:
movups 48(%rsp), %xmm0
movups 32(%rsp), %xmm1
movq 8(%rsp), %rax
movups %xmm0, 168(%rsp)
movups %xmm1, 152(%rsp)
movq %rax, 144(%rsp)
leaq 168(%rsp), %rax
movq %rax, 64(%rsp)
leaq 152(%rsp), %rax
movq %rax, 72(%rsp)
leaq 144(%rsp), %rax
movq %rax, 80(%rsp)
leaq 128(%rsp), %rdi
leaq 112(%rsp), %rsi
leaq 104(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 112(%rsp), %rcx
movl 120(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z10GridKernel9DataFrameS_Pj, %edi
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB0_7:
callq hipDeviceSynchronize
movl %eax, %ebp
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 64(%rsp), %rdi
callq hipEventElapsedTime
testl %ebp, %ebp
je .LBB0_9
# %bb.8:
movl %ebp, %edi
callq hipGetErrorString
movl $.L.str.4, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
.LBB0_9:
movss 64(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.5, %edi
movb $1, %al
callq printf
movq 40(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB0_11
# %bb.10:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.6, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
.LBB0_11:
movq 8(%rsp), %rsi
leaq 4(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl 4(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movq 56(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movss 64(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z9CircleFit9DataFrameS_, .Lfunc_end0-_Z9CircleFit9DataFrameS_
.cfi_endproc
# -- End function
.globl _Z25__device_stub__GridKernel9DataFrameS_Pj # -- Begin function _Z25__device_stub__GridKernel9DataFrameS_Pj
.p2align 4, 0x90
.type _Z25__device_stub__GridKernel9DataFrameS_Pj,@function
_Z25__device_stub__GridKernel9DataFrameS_Pj: # @_Z25__device_stub__GridKernel9DataFrameS_Pj
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 80(%rsp)
movq %rsi, 88(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 72(%rsp)
movq %r8, 56(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z10GridKernel9DataFrameS_Pj, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z25__device_stub__GridKernel9DataFrameS_Pj, .Lfunc_end1-_Z25__device_stub__GridKernel9DataFrameS_Pj
.cfi_endproc
# -- End function
.globl _Z4dump9DataFrame # -- Begin function _Z4dump9DataFrame
.p2align 4, 0x90
.type _Z4dump9DataFrame,@function
_Z4dump9DataFrame: # @_Z4dump9DataFrame
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, 8(%rsp) # 8-byte Spill
movq %rdi, %rax
shrq $32, %rax
movq %rax, 16(%rsp) # 8-byte Spill
testl %eax, %eax
jle .LBB2_6
# %bb.1: # %.preheader.lr.ph
movq %rdi, %r14
movl %r14d, %r12d
xorl %r13d, %r13d
xorl %ebp, %ebp
jmp .LBB2_2
.p2align 4, 0x90
.LBB2_5: # %._crit_edge
# in Loop: Header=BB2_2 Depth=1
movl $10, %edi
callq putchar@PLT
incq %rbp
addl %r14d, %r13d
cmpq 16(%rsp), %rbp # 8-byte Folded Reload
je .LBB2_6
.LBB2_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_4 Depth 2
testl %r14d, %r14d
jle .LBB2_5
# %bb.3: # %.lr.ph
# in Loop: Header=BB2_2 Depth=1
movl %r13d, %eax
movq 8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbx
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_4: # Parent Loop BB2_2 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
incq %r15
cmpq %r15, %r12
jne .LBB2_4
jmp .LBB2_5
.LBB2_6: # %._crit_edge11
movl $10, %edi
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp putchar@PLT # TAILCALL
.Lfunc_end2:
.size _Z4dump9DataFrame, .Lfunc_end2-_Z4dump9DataFrame
.cfi_endproc
# -- End function
.globl _Z7readBinSt6vectorIfSaIfEE # -- Begin function _Z7readBinSt6vectorIfSaIfEE
.p2align 4, 0x90
.type _Z7readBinSt6vectorIfSaIfEE,@function
_Z7readBinSt6vectorIfSaIfEE: # @_Z7readBinSt6vectorIfSaIfEE
.cfi_startproc
# %bb.0:
retq
.Lfunc_end3:
.size _Z7readBinSt6vectorIfSaIfEE, .Lfunc_end3-_Z7readBinSt6vectorIfSaIfEE
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 592
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
.Ltmp0:
leaq 16(%rsp), %rdi
movl $.L.str.10, %esi
movl $4, %edx
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1EPKcSt13_Ios_Openmode
.Ltmp1:
# %bb.1: # %.preheader
movq 16(%rsp), %rax
movq -24(%rax), %rax
xorl %ebx, %ebx
testb $2, 48(%rsp,%rax)
movl $0, %r14d
jne .LBB4_23
# %bb.2: # %.lr.ph.preheader
xorl %ebp, %ebp
leaq 12(%rsp), %r12
xorl %r14d, %r14d
xorl %ebx, %ebx
jmp .LBB4_3
.p2align 4, 0x90
.LBB4_5: # in Loop: Header=BB4_3 Depth=1
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%r14)
.LBB4_22: # %_ZNSt6vectorIfSaIfEE9push_backERKf.exit
# in Loop: Header=BB4_3 Depth=1
addq $4, %r14
movq 16(%rsp), %rax
movq -24(%rax), %rax
testb $2, 48(%rsp,%rax)
jne .LBB4_23
.LBB4_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
.Ltmp3:
movl $4, %edx
leaq 16(%rsp), %rdi
movq %r12, %rsi
callq _ZNSi4readEPcl
.Ltmp4:
# %bb.4: # in Loop: Header=BB4_3 Depth=1
cmpq %rbp, %r14
jne .LBB4_5
# %bb.6: # in Loop: Header=BB4_3 Depth=1
subq %rbx, %r14
movabsq $9223372036854775804, %rax # imm = 0x7FFFFFFFFFFFFFFC
cmpq %rax, %r14
je .LBB4_7
# %bb.9: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i
# in Loop: Header=BB4_3 Depth=1
movq %r14, %r15
sarq $2, %r15
cmpq $1, %r15
movq %r15, %rax
adcq $0, %rax
leaq (%rax,%r15), %rcx
movabsq $2305843009213693951, %rbp # imm = 0x1FFFFFFFFFFFFFFF
cmpq %rbp, %rcx
jae .LBB4_10
# %bb.11: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i
# in Loop: Header=BB4_3 Depth=1
addq %r15, %rax
jae .LBB4_12
.LBB4_13: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i
# in Loop: Header=BB4_3 Depth=1
testq %rbp, %rbp
je .LBB4_14
.LBB4_15: # in Loop: Header=BB4_3 Depth=1
leaq (,%rbp,4), %rdi
.Ltmp5:
callq _Znwm
.Ltmp6:
# %bb.16: # in Loop: Header=BB4_3 Depth=1
movq %rax, %r13
jmp .LBB4_17
.LBB4_10: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i
# in Loop: Header=BB4_3 Depth=1
movq %rbp, %rcx
addq %r15, %rax
jb .LBB4_13
.LBB4_12: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i
# in Loop: Header=BB4_3 Depth=1
movq %rcx, %rbp
testq %rbp, %rbp
jne .LBB4_15
.LBB4_14: # in Loop: Header=BB4_3 Depth=1
xorl %r13d, %r13d
.LBB4_17: # %_ZNSt12_Vector_baseIfSaIfEE11_M_allocateEm.exit.i.i
# in Loop: Header=BB4_3 Depth=1
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%r13,%r15,4)
testq %r14, %r14
jle .LBB4_19
# %bb.18: # in Loop: Header=BB4_3 Depth=1
movq %r13, %rdi
movq %rbx, %rsi
movq %r14, %rdx
callq memmove@PLT
.LBB4_19: # %_ZNSt6vectorIfSaIfEE11_S_relocateEPfS2_S2_RS0_.exit.i.i
# in Loop: Header=BB4_3 Depth=1
testq %rbx, %rbx
je .LBB4_21
# %bb.20: # in Loop: Header=BB4_3 Depth=1
movq %rbx, %rdi
callq _ZdlPv
.LBB4_21: # %_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJRKfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_.exit.i
# in Loop: Header=BB4_3 Depth=1
addq %r13, %r14
leaq (,%rbp,4), %rbp
addq %r13, %rbp
movq %r13, %rbx
jmp .LBB4_22
.LBB4_23: # %._crit_edge
leaq 32(%rsp), %rdi
.Ltmp8:
callq _ZNSt13basic_filebufIcSt11char_traitsIcEE5closeEv
.Ltmp9:
# %bb.24: # %.noexc17
testq %rax, %rax
jne .LBB4_26
# %bb.25:
movq 16(%rsp), %rax
movq -24(%rax), %rax
leaq (%rsp,%rax), %rdi
addq $16, %rdi
movl 48(%rsp,%rax), %esi
orl $4, %esi
.Ltmp10:
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.Ltmp11:
.LBB4_26: # %_ZNSt14basic_ifstreamIcSt11char_traitsIcEE5closeEv.exit
subq %rbx, %r14
andq $-8, %r14
shlq $29, %r14
orq $2, %r14
movl $180, %edi
callq malloc
movq %rax, %r15
.Ltmp13:
movabsq $64424509443, %rdx # imm = 0xF00000003
movq %r14, %rdi
movq %rbx, %rsi
movq %rax, %rcx
callq _Z9CircleFit9DataFrameS_
.Ltmp14:
# %bb.27: # %.preheader.i.preheader
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB4_28: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB4_29 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB4_29: # Parent Loop BB4_28 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
incq %r12
cmpq $3, %r12
jne .LBB4_29
# %bb.30: # %._crit_edge.i
# in Loop: Header=BB4_28 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $12, %r15
cmpq $15, %r14
jne .LBB4_28
# %bb.31: # %_Z4dump9DataFrame.exit
movl $10, %edi
callq putchar@PLT
leaq 16(%rsp), %rdi
movl $_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE, %esi
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev
leaq 272(%rsp), %rdi
callq _ZNSt8ios_baseD2Ev
testq %rbx, %rbx
je .LBB4_33
# %bb.32:
movq %rbx, %rdi
callq _ZdlPv
.LBB4_33: # %_ZNSt6vectorIfSaIfEED2Ev.exit
xorl %eax, %eax
addq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB4_7:
.cfi_def_cfa_offset 592
.Ltmp16:
movl $.L.str.11, %edi
callq _ZSt20__throw_length_errorPKc
.Ltmp17:
# %bb.8: # %.noexc
.LBB4_38:
.Ltmp15:
jmp .LBB4_39
.LBB4_34:
.Ltmp2:
movq %rax, %r14
xorl %ebx, %ebx
jmp .LBB4_40
.LBB4_35:
.Ltmp12:
jmp .LBB4_39
.LBB4_37: # %.loopexit.split-lp
.Ltmp18:
jmp .LBB4_39
.LBB4_36: # %.loopexit
.Ltmp7:
.LBB4_39:
movq %rax, %r14
leaq 16(%rsp), %rdi
movl $_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE, %esi
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev
leaq 272(%rsp), %rdi
callq _ZNSt8ios_baseD2Ev
.LBB4_40:
testq %rbx, %rbx
je .LBB4_42
# %bb.41:
movq %rbx, %rdi
callq _ZdlPv
.LBB4_42: # %_ZNSt6vectorIfSaIfEED2Ev.exit21
movq %r14, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table4:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1
.uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2
.byte 0 # On action: cleanup
.uleb128 .Ltmp3-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp6-.Ltmp3 # Call between .Ltmp3 and .Ltmp6
.uleb128 .Ltmp7-.Lfunc_begin0 # jumps to .Ltmp7
.byte 0 # On action: cleanup
.uleb128 .Ltmp6-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp8-.Ltmp6 # Call between .Ltmp6 and .Ltmp8
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp8-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp11-.Ltmp8 # Call between .Ltmp8 and .Ltmp11
.uleb128 .Ltmp12-.Lfunc_begin0 # jumps to .Ltmp12
.byte 0 # On action: cleanup
.uleb128 .Ltmp13-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Ltmp14-.Ltmp13 # Call between .Ltmp13 and .Ltmp14
.uleb128 .Ltmp15-.Lfunc_begin0 # jumps to .Ltmp15
.byte 0 # On action: cleanup
.uleb128 .Ltmp16-.Lfunc_begin0 # >> Call Site 6 <<
.uleb128 .Ltmp17-.Ltmp16 # Call between .Ltmp16 and .Ltmp17
.uleb128 .Ltmp18-.Lfunc_begin0 # jumps to .Ltmp18
.byte 0 # On action: cleanup
.uleb128 .Ltmp17-.Lfunc_begin0 # >> Call Site 7 <<
.uleb128 .Lfunc_end4-.Ltmp17 # Call between .Ltmp17 and .Lfunc_end4
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.p2align 2, 0x0
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10GridKernel9DataFrameS_Pj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "CUDA malloc data DataFrame: %s\n"
.size .L.str, 32
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "CUDA malloc circles DataFrame: %s\n"
.size .L.str.1, 35
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "CUDA malloc counter variable: %s\n"
.size .L.str.2, 34
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "initialized counter : %u\n"
.size .L.str.3, 26
.type _Z10GridKernel9DataFrameS_Pj,@object # @_Z10GridKernel9DataFrameS_Pj
.section .rodata,"a",@progbits
.globl _Z10GridKernel9DataFrameS_Pj
.p2align 3, 0x0
_Z10GridKernel9DataFrameS_Pj:
.quad _Z25__device_stub__GridKernel9DataFrameS_Pj
.size _Z10GridKernel9DataFrameS_Pj, 8
.type .L.str.4,@object # @.str.4
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.4:
.asciz "Run kernel: %s\n"
.size .L.str.4, 16
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Time: %3.5f ms\n"
.size .L.str.5, 16
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Copy circles off of device: %s\n"
.size .L.str.6, 32
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "counter : %u\n"
.size .L.str.7, 14
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%3.1f\t"
.size .L.str.8, 7
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "file.dat"
.size .L.str.10, 9
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "vector::_M_realloc_insert"
.size .L.str.11, 26
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10GridKernel9DataFrameS_Pj"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__GridKernel9DataFrameS_Pj
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Unwind_Resume
.addrsig_sym _Z10GridKernel9DataFrameS_Pj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <unistd.h>
#include <stdio.h>
#include <iostream>
#include <cstdlib>
#include <errno.h>
#include <math.h>
#include <ctime>
#include <curand.h>
#include <curand_kernel.h>
__global__ void init(float time, curandState_t* states){
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
curand_init ( time, threadID, 0, &states[threadID] );
}
__global__ void getRandNums(curandState *states, int* randNums) {
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
float x = curand_uniform(&states[threadID]);
float y = curand_uniform(&states[threadID]);
float dx = abs(.5 - x);
float dy = abs(.5 - y);
float distance = sqrt ( dx * dx + dy * dy);
if (.5 > distance){
randNums[threadID] = 1;
}
else {
randNums[threadID] = 0;
}
}
int main(int argc, char* argv[]) {
//default number of blocks if not specified via command line
long int blocks = 256;
//number of threads per block, max is 1024 on an nvidia m40
int bThreads = 512;
long int arg1 = 0;
errno = 0;
char *endIn = NULL;
if ( argc >= 2){
arg1 = strtol(argv[1], &endIn, 10);
if (arg1 != 0 || errno == 0 ){
blocks = arg1;
}
} else {
std::cout << "Number of blocks not specified, using default 256" << std::endl;
}
//total threads is needed for size of the arrays and later for monte-carlo approximation
int tThreads = blocks * bThreads;
std::cout << "Blocks: " << blocks << " Total Threads: " << tThreads << std::endl;
curandState_t *states;
cudaMallocManaged(&states, tThreads * sizeof(curandState_t));
init<<<blocks, bThreads>>>(time(0), states);
int* randNums;
cudaMallocManaged(&randNums, tThreads * sizeof(long int));
getRandNums<<<blocks, bThreads>>>(states, randNums);
cudaDeviceSynchronize();
int insidePoints = 0;
for (int i = 0; i < tThreads; i++) {
if (randNums[i] == 1){
insidePoints++;
}
}
float pi = 4 * (static_cast<double>(insidePoints) / static_cast<double>(tThreads));
std::cout << "Pi is approx: " << pi << std::endl;
cudaFree(states);
cudaFree(randNums);
return 0;
} | .file "tmpxft_00071915_00000000-6_cudaPi.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3899:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3899:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z4initfP17curandStateXORWOWfP17curandStateXORWOW
.type _Z42__device_stub__Z4initfP17curandStateXORWOWfP17curandStateXORWOW, @function
_Z42__device_stub__Z4initfP17curandStateXORWOWfP17curandStateXORWOW:
.LFB3921:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movss %xmm0, 12(%rsp)
movq %rdi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4initfP17curandStateXORWOW(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3921:
.size _Z42__device_stub__Z4initfP17curandStateXORWOWfP17curandStateXORWOW, .-_Z42__device_stub__Z4initfP17curandStateXORWOWfP17curandStateXORWOW
.globl _Z4initfP17curandStateXORWOW
.type _Z4initfP17curandStateXORWOW, @function
_Z4initfP17curandStateXORWOW:
.LFB3922:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z4initfP17curandStateXORWOWfP17curandStateXORWOW
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3922:
.size _Z4initfP17curandStateXORWOW, .-_Z4initfP17curandStateXORWOW
.globl _Z51__device_stub__Z11getRandNumsP17curandStateXORWOWPiP17curandStateXORWOWPi
.type _Z51__device_stub__Z11getRandNumsP17curandStateXORWOWPiP17curandStateXORWOWPi, @function
_Z51__device_stub__Z11getRandNumsP17curandStateXORWOWPiP17curandStateXORWOWPi:
.LFB3923:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z11getRandNumsP17curandStateXORWOWPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3923:
.size _Z51__device_stub__Z11getRandNumsP17curandStateXORWOWPiP17curandStateXORWOWPi, .-_Z51__device_stub__Z11getRandNumsP17curandStateXORWOWPiP17curandStateXORWOWPi
.globl _Z11getRandNumsP17curandStateXORWOWPi
.type _Z11getRandNumsP17curandStateXORWOWPi, @function
_Z11getRandNumsP17curandStateXORWOWPi:
.LFB3924:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z11getRandNumsP17curandStateXORWOWPiP17curandStateXORWOWPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3924:
.size _Z11getRandNumsP17curandStateXORWOWPi, .-_Z11getRandNumsP17curandStateXORWOWPi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Number of blocks not specified, using default 256"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Blocks: "
.LC2:
.string " Total Threads: "
.LC4:
.string "Pi is approx: "
.text
.globl main
.type main, @function
main:
.LFB3896:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $88, %rsp
.cfi_def_cfa_offset 128
movl %edi, %ebx
movq %rsi, %r12
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
call __errno_location@PLT
movl $0, (%rax)
movq $0, 24(%rsp)
cmpl $1, %ebx
jle .L20
movq %rax, %rbp
leaq 24(%rsp), %rsi
movq 8(%r12), %rdi
movl $10, %edx
call __isoc23_strtol@PLT
movq %rax, %rbx
testq %rax, %rax
jne .L21
cmpl $0, 0(%rbp)
setne %bl
movzbl %bl, %ebx
salq $8, %rbx
jmp .L21
.L20:
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $256, %ebx
.L21:
movl %ebx, %r13d
movl %ebx, %ebp
sall $9, %ebp
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %rbx, %rsi
call _ZNSo9_M_insertIlEERSoT_@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebp, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movslq %ebp, %r12
leaq (%r12,%r12,2), %rsi
salq $4, %rsi
leaq 32(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl $512, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl %ebx, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L31
.L22:
leaq 0(,%r12,8), %rsi
leaq 40(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl $512, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl %r13d, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L32
.L23:
call cudaDeviceSynchronize@PLT
testl %ebp, %ebp
jle .L28
movq 40(%rsp), %rax
leal -1(%rbp), %edx
leaq 4(%rax,%rdx,4), %rsi
movl $0, %edx
.L26:
cmpl $1, (%rax)
sete %cl
movzbl %cl, %ecx
addl %ecx, %edx
addq $4, %rax
cmpq %rsi, %rax
jne .L26
.L24:
pxor %xmm0, %xmm0
cvtsi2sdl %edx, %xmm0
pxor %xmm1, %xmm1
cvtsi2sdl %ebp, %xmm1
divsd %xmm1, %xmm0
mulsd .LC3(%rip), %xmm0
pxor %xmm2, %xmm2
cvtsd2ss %xmm0, %xmm2
movss %xmm2, 12(%rsp)
leaq .LC4(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L33
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
movq 32(%rsp), %rbx
movl $0, %edi
call time@PLT
pxor %xmm0, %xmm0
cvtsi2ssq %rax, %xmm0
movq %rbx, %rdi
call _Z42__device_stub__Z4initfP17curandStateXORWOWfP17curandStateXORWOW
jmp .L22
.L32:
movq 40(%rsp), %rsi
movq 32(%rsp), %rdi
call _Z51__device_stub__Z11getRandNumsP17curandStateXORWOWPiP17curandStateXORWOWPi
jmp .L23
.L28:
movl $0, %edx
jmp .L24
.L33:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3896:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC5:
.string "_Z11getRandNumsP17curandStateXORWOWPi"
.section .rodata.str1.1
.LC6:
.string "_Z4initfP17curandStateXORWOW"
.LC7:
.string "precalc_xorwow_matrix"
.LC8:
.string "precalc_xorwow_offset_matrix"
.LC9:
.string "mrg32k3aM1"
.LC10:
.string "mrg32k3aM2"
.LC11:
.string "mrg32k3aM1SubSeq"
.LC12:
.string "mrg32k3aM2SubSeq"
.LC13:
.string "mrg32k3aM1Seq"
.LC14:
.string "mrg32k3aM2Seq"
.LC15:
.string "__cr_lgamma_table"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3926:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z11getRandNumsP17curandStateXORWOWPi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z4initfP17curandStateXORWOW(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21precalc_xorwow_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM1(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM2(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM1Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM2Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $72, %r9d
movl $0, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17__cr_lgamma_table(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3926:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL17__cr_lgamma_table
.comm _ZL17__cr_lgamma_table,72,32
.local _ZL13mrg32k3aM2Seq
.comm _ZL13mrg32k3aM2Seq,2304,32
.local _ZL13mrg32k3aM1Seq
.comm _ZL13mrg32k3aM1Seq,2304,32
.local _ZL16mrg32k3aM2SubSeq
.comm _ZL16mrg32k3aM2SubSeq,2016,32
.local _ZL16mrg32k3aM1SubSeq
.comm _ZL16mrg32k3aM1SubSeq,2016,32
.local _ZL10mrg32k3aM2
.comm _ZL10mrg32k3aM2,2304,32
.local _ZL10mrg32k3aM1
.comm _ZL10mrg32k3aM1,2304,32
.local _ZL28precalc_xorwow_offset_matrix
.comm _ZL28precalc_xorwow_offset_matrix,102400,32
.local _ZL21precalc_xorwow_matrix
.comm _ZL21precalc_xorwow_matrix,102400,32
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC3:
.long 0
.long 1074790400
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <unistd.h>
#include <stdio.h>
#include <iostream>
#include <cstdlib>
#include <errno.h>
#include <math.h>
#include <ctime>
#include <curand.h>
#include <curand_kernel.h>
__global__ void init(float time, curandState_t* states){
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
curand_init ( time, threadID, 0, &states[threadID] );
}
__global__ void getRandNums(curandState *states, int* randNums) {
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
float x = curand_uniform(&states[threadID]);
float y = curand_uniform(&states[threadID]);
float dx = abs(.5 - x);
float dy = abs(.5 - y);
float distance = sqrt ( dx * dx + dy * dy);
if (.5 > distance){
randNums[threadID] = 1;
}
else {
randNums[threadID] = 0;
}
}
int main(int argc, char* argv[]) {
//default number of blocks if not specified via command line
long int blocks = 256;
//number of threads per block, max is 1024 on an nvidia m40
int bThreads = 512;
long int arg1 = 0;
errno = 0;
char *endIn = NULL;
if ( argc >= 2){
arg1 = strtol(argv[1], &endIn, 10);
if (arg1 != 0 || errno == 0 ){
blocks = arg1;
}
} else {
std::cout << "Number of blocks not specified, using default 256" << std::endl;
}
//total threads is needed for size of the arrays and later for monte-carlo approximation
int tThreads = blocks * bThreads;
std::cout << "Blocks: " << blocks << " Total Threads: " << tThreads << std::endl;
curandState_t *states;
cudaMallocManaged(&states, tThreads * sizeof(curandState_t));
init<<<blocks, bThreads>>>(time(0), states);
int* randNums;
cudaMallocManaged(&randNums, tThreads * sizeof(long int));
getRandNums<<<blocks, bThreads>>>(states, randNums);
cudaDeviceSynchronize();
int insidePoints = 0;
for (int i = 0; i < tThreads; i++) {
if (randNums[i] == 1){
insidePoints++;
}
}
float pi = 4 * (static_cast<double>(insidePoints) / static_cast<double>(tThreads));
std::cout << "Pi is approx: " << pi << std::endl;
cudaFree(states);
cudaFree(randNums);
return 0;
} | #include <hip/hip_runtime.h>
#include <unistd.h>
#include <stdio.h>
#include <iostream>
#include <cstdlib>
#include <errno.h>
#include <math.h>
#include <ctime>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
__global__ void init(float time, hiprandState_t* states){
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
hiprand_init ( time, threadID, 0, &states[threadID] );
}
__global__ void getRandNums(hiprandState *states, int* randNums) {
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
float x = hiprand_uniform(&states[threadID]);
float y = hiprand_uniform(&states[threadID]);
float dx = abs(.5 - x);
float dy = abs(.5 - y);
float distance = sqrt ( dx * dx + dy * dy);
if (.5 > distance){
randNums[threadID] = 1;
}
else {
randNums[threadID] = 0;
}
}
int main(int argc, char* argv[]) {
//default number of blocks if not specified via command line
long int blocks = 256;
//number of threads per block, max is 1024 on an nvidia m40
int bThreads = 512;
long int arg1 = 0;
errno = 0;
char *endIn = NULL;
if ( argc >= 2){
arg1 = strtol(argv[1], &endIn, 10);
if (arg1 != 0 || errno == 0 ){
blocks = arg1;
}
} else {
std::cout << "Number of blocks not specified, using default 256" << std::endl;
}
//total threads is needed for size of the arrays and later for monte-carlo approximation
int tThreads = blocks * bThreads;
std::cout << "Blocks: " << blocks << " Total Threads: " << tThreads << std::endl;
hiprandState_t *states;
hipMallocManaged(&states, tThreads * sizeof(hiprandState_t));
init<<<blocks, bThreads>>>(time(0), states);
int* randNums;
hipMallocManaged(&randNums, tThreads * sizeof(long int));
getRandNums<<<blocks, bThreads>>>(states, randNums);
hipDeviceSynchronize();
int insidePoints = 0;
for (int i = 0; i < tThreads; i++) {
if (randNums[i] == 1){
insidePoints++;
}
}
float pi = 4 * (static_cast<double>(insidePoints) / static_cast<double>(tThreads));
std::cout << "Pi is approx: " << pi << std::endl;
hipFree(states);
hipFree(randNums);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <unistd.h>
#include <stdio.h>
#include <iostream>
#include <cstdlib>
#include <errno.h>
#include <math.h>
#include <ctime>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
__global__ void init(float time, hiprandState_t* states){
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
hiprand_init ( time, threadID, 0, &states[threadID] );
}
__global__ void getRandNums(hiprandState *states, int* randNums) {
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
float x = hiprand_uniform(&states[threadID]);
float y = hiprand_uniform(&states[threadID]);
float dx = abs(.5 - x);
float dy = abs(.5 - y);
float distance = sqrt ( dx * dx + dy * dy);
if (.5 > distance){
randNums[threadID] = 1;
}
else {
randNums[threadID] = 0;
}
}
int main(int argc, char* argv[]) {
//default number of blocks if not specified via command line
long int blocks = 256;
//number of threads per block, max is 1024 on an nvidia m40
int bThreads = 512;
long int arg1 = 0;
errno = 0;
char *endIn = NULL;
if ( argc >= 2){
arg1 = strtol(argv[1], &endIn, 10);
if (arg1 != 0 || errno == 0 ){
blocks = arg1;
}
} else {
std::cout << "Number of blocks not specified, using default 256" << std::endl;
}
//total threads is needed for size of the arrays and later for monte-carlo approximation
int tThreads = blocks * bThreads;
std::cout << "Blocks: " << blocks << " Total Threads: " << tThreads << std::endl;
hiprandState_t *states;
hipMallocManaged(&states, tThreads * sizeof(hiprandState_t));
init<<<blocks, bThreads>>>(time(0), states);
int* randNums;
hipMallocManaged(&randNums, tThreads * sizeof(long int));
getRandNums<<<blocks, bThreads>>>(states, randNums);
hipDeviceSynchronize();
int insidePoints = 0;
for (int i = 0; i < tThreads; i++) {
if (randNums[i] == 1){
insidePoints++;
}
}
float pi = 4 * (static_cast<double>(insidePoints) / static_cast<double>(tThreads));
std::cout << "Pi is approx: " << pi << std::endl;
hipFree(states);
hipFree(randNums);
return 0;
} | .text
.file "cudaPi.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z19__device_stub__initfP12hiprandState # -- Begin function _Z19__device_stub__initfP12hiprandState
.p2align 4, 0x90
.type _Z19__device_stub__initfP12hiprandState,@function
_Z19__device_stub__initfP12hiprandState: # @_Z19__device_stub__initfP12hiprandState
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movss %xmm0, 4(%rsp)
movq %rdi, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 64(%rsp)
leaq 56(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z4initfP12hiprandState, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z19__device_stub__initfP12hiprandState, .Lfunc_end0-_Z19__device_stub__initfP12hiprandState
.cfi_endproc
# -- End function
.globl _Z26__device_stub__getRandNumsP12hiprandStatePi # -- Begin function _Z26__device_stub__getRandNumsP12hiprandStatePi
.p2align 4, 0x90
.type _Z26__device_stub__getRandNumsP12hiprandStatePi,@function
_Z26__device_stub__getRandNumsP12hiprandStatePi: # @_Z26__device_stub__getRandNumsP12hiprandStatePi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z11getRandNumsP12hiprandStatePi, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z26__device_stub__getRandNumsP12hiprandStatePi, .Lfunc_end1-_Z26__device_stub__getRandNumsP12hiprandStatePi
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x4010000000000000 # double 4
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $128, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movl %edi, %ebp
callq __errno_location
movl $0, (%rax)
movq $0, 120(%rsp)
cmpl $2, %ebp
jl .LBB2_3
# %bb.1:
movq %rax, %r14
movq 8(%rbx), %rdi
leaq 120(%rsp), %rsi
movl $10, %edx
callq __isoc23_strtol
testq %rax, %rax
je .LBB2_6
.LBB2_2:
movq %rax, %r15
jmp .LBB2_10
.LBB2_3:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $49, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB2_28
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB2_8
# %bb.5:
movzbl 67(%rbx), %eax
jmp .LBB2_9
.LBB2_6:
movl $256, %r15d # imm = 0x100
cmpl $0, (%r14)
jne .LBB2_10
jmp .LBB2_2
.LBB2_8:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $256, %r15d # imm = 0x100
.LBB2_10:
movl %r15d, %ebx
shll $9, %ebx
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $8, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movq %r15, %rsi
callq _ZNSo9_M_insertIlEERSoT_
movq %rax, %r14
movl $.L.str.2, %esi
movl $16, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r14, %rdi
movl %ebx, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB2_28
# %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i39
cmpb $0, 56(%r14)
je .LBB2_13
# %bb.12:
movzbl 67(%r14), %ecx
jmp .LBB2_14
.LBB2_13:
movq %r14, %rdi
movq %rax, %r12
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r12, %rax
.LBB2_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit42
movabsq $4294967808, %r14 # imm = 0x100000200
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movslq %ebx, %r12
movq %r12, %rax
shlq $4, %rax
leaq (%rax,%rax,2), %rsi
leaq 16(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movl %r15d, %eax
leaq (%rax,%r14), %r15
addq $-512, %r15 # imm = 0xFE00
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_16
# %bb.15:
xorl %edi, %edi
callq time
cvtsi2ss %rax, %xmm0
movq 16(%rsp), %rax
movss %xmm0, 24(%rsp)
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z4initfP12hiprandState, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_16:
shlq $3, %r12
leaq 8(%rsp), %rdi
movq %r12, %rsi
movl $1, %edx
callq hipMallocManaged
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_18
# %bb.17:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 72(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11getRandNumsP12hiprandStatePi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_18:
callq hipDeviceSynchronize
testl %ebx, %ebx
jle .LBB2_22
# %bb.19: # %.lr.ph
movq 8(%rsp), %rax
movl %ebx, %ecx
xorl %edx, %edx
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_20: # =>This Inner Loop Header: Depth=1
xorl %edi, %edi
cmpl $1, (%rax,%rdx,4)
sete %dil
addl %edi, %esi
incq %rdx
cmpq %rdx, %rcx
jne .LBB2_20
# %bb.21: # %._crit_edge.loopexit
xorps %xmm0, %xmm0
cvtsi2sd %esi, %xmm0
jmp .LBB2_23
.LBB2_22:
xorps %xmm0, %xmm0
.LBB2_23: # %._crit_edge
cvtsi2sd %ebx, %xmm1
divsd %xmm1, %xmm0
mulsd .LCPI2_0(%rip), %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 92(%rsp) # 4-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 92(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB2_28
# %bb.24: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i44
cmpb $0, 56(%rbx)
je .LBB2_26
# %bb.25:
movzbl 67(%rbx), %ecx
jmp .LBB2_27
.LBB2_26:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB2_27: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit47
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $128, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_28:
.cfi_def_cfa_offset 176
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4initfP12hiprandState, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11getRandNumsP12hiprandStatePi, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4initfP12hiprandState,@object # @_Z4initfP12hiprandState
.section .rodata,"a",@progbits
.globl _Z4initfP12hiprandState
.p2align 3, 0x0
_Z4initfP12hiprandState:
.quad _Z19__device_stub__initfP12hiprandState
.size _Z4initfP12hiprandState, 8
.type _Z11getRandNumsP12hiprandStatePi,@object # @_Z11getRandNumsP12hiprandStatePi
.globl _Z11getRandNumsP12hiprandStatePi
.p2align 3, 0x0
_Z11getRandNumsP12hiprandStatePi:
.quad _Z26__device_stub__getRandNumsP12hiprandStatePi
.size _Z11getRandNumsP12hiprandStatePi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Number of blocks not specified, using default 256"
.size .L.str, 50
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Blocks: "
.size .L.str.1, 9
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz " Total Threads: "
.size .L.str.2, 17
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Pi is approx: "
.size .L.str.3, 15
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4initfP12hiprandState"
.size .L__unnamed_1, 24
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z11getRandNumsP12hiprandStatePi"
.size .L__unnamed_2, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__initfP12hiprandState
.addrsig_sym _Z26__device_stub__getRandNumsP12hiprandStatePi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4initfP12hiprandState
.addrsig_sym _Z11getRandNumsP12hiprandStatePi
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00071915_00000000-6_cudaPi.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3899:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3899:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z4initfP17curandStateXORWOWfP17curandStateXORWOW
.type _Z42__device_stub__Z4initfP17curandStateXORWOWfP17curandStateXORWOW, @function
_Z42__device_stub__Z4initfP17curandStateXORWOWfP17curandStateXORWOW:
.LFB3921:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movss %xmm0, 12(%rsp)
movq %rdi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4initfP17curandStateXORWOW(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3921:
.size _Z42__device_stub__Z4initfP17curandStateXORWOWfP17curandStateXORWOW, .-_Z42__device_stub__Z4initfP17curandStateXORWOWfP17curandStateXORWOW
.globl _Z4initfP17curandStateXORWOW
.type _Z4initfP17curandStateXORWOW, @function
_Z4initfP17curandStateXORWOW:
.LFB3922:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z4initfP17curandStateXORWOWfP17curandStateXORWOW
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3922:
.size _Z4initfP17curandStateXORWOW, .-_Z4initfP17curandStateXORWOW
.globl _Z51__device_stub__Z11getRandNumsP17curandStateXORWOWPiP17curandStateXORWOWPi
.type _Z51__device_stub__Z11getRandNumsP17curandStateXORWOWPiP17curandStateXORWOWPi, @function
_Z51__device_stub__Z11getRandNumsP17curandStateXORWOWPiP17curandStateXORWOWPi:
.LFB3923:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z11getRandNumsP17curandStateXORWOWPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3923:
.size _Z51__device_stub__Z11getRandNumsP17curandStateXORWOWPiP17curandStateXORWOWPi, .-_Z51__device_stub__Z11getRandNumsP17curandStateXORWOWPiP17curandStateXORWOWPi
.globl _Z11getRandNumsP17curandStateXORWOWPi
.type _Z11getRandNumsP17curandStateXORWOWPi, @function
_Z11getRandNumsP17curandStateXORWOWPi:
.LFB3924:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z11getRandNumsP17curandStateXORWOWPiP17curandStateXORWOWPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3924:
.size _Z11getRandNumsP17curandStateXORWOWPi, .-_Z11getRandNumsP17curandStateXORWOWPi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Number of blocks not specified, using default 256"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Blocks: "
.LC2:
.string " Total Threads: "
.LC4:
.string "Pi is approx: "
.text
.globl main
.type main, @function
main:
.LFB3896:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $88, %rsp
.cfi_def_cfa_offset 128
movl %edi, %ebx
movq %rsi, %r12
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
call __errno_location@PLT
movl $0, (%rax)
movq $0, 24(%rsp)
cmpl $1, %ebx
jle .L20
movq %rax, %rbp
leaq 24(%rsp), %rsi
movq 8(%r12), %rdi
movl $10, %edx
call __isoc23_strtol@PLT
movq %rax, %rbx
testq %rax, %rax
jne .L21
cmpl $0, 0(%rbp)
setne %bl
movzbl %bl, %ebx
salq $8, %rbx
jmp .L21
.L20:
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $256, %ebx
.L21:
movl %ebx, %r13d
movl %ebx, %ebp
sall $9, %ebp
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %rbx, %rsi
call _ZNSo9_M_insertIlEERSoT_@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebp, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movslq %ebp, %r12
leaq (%r12,%r12,2), %rsi
salq $4, %rsi
leaq 32(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl $512, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl %ebx, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L31
.L22:
leaq 0(,%r12,8), %rsi
leaq 40(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl $512, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl %r13d, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L32
.L23:
call cudaDeviceSynchronize@PLT
testl %ebp, %ebp
jle .L28
movq 40(%rsp), %rax
leal -1(%rbp), %edx
leaq 4(%rax,%rdx,4), %rsi
movl $0, %edx
.L26:
cmpl $1, (%rax)
sete %cl
movzbl %cl, %ecx
addl %ecx, %edx
addq $4, %rax
cmpq %rsi, %rax
jne .L26
.L24:
pxor %xmm0, %xmm0
cvtsi2sdl %edx, %xmm0
pxor %xmm1, %xmm1
cvtsi2sdl %ebp, %xmm1
divsd %xmm1, %xmm0
mulsd .LC3(%rip), %xmm0
pxor %xmm2, %xmm2
cvtsd2ss %xmm0, %xmm2
movss %xmm2, 12(%rsp)
leaq .LC4(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L33
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
movq 32(%rsp), %rbx
movl $0, %edi
call time@PLT
pxor %xmm0, %xmm0
cvtsi2ssq %rax, %xmm0
movq %rbx, %rdi
call _Z42__device_stub__Z4initfP17curandStateXORWOWfP17curandStateXORWOW
jmp .L22
.L32:
movq 40(%rsp), %rsi
movq 32(%rsp), %rdi
call _Z51__device_stub__Z11getRandNumsP17curandStateXORWOWPiP17curandStateXORWOWPi
jmp .L23
.L28:
movl $0, %edx
jmp .L24
.L33:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3896:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC5:
.string "_Z11getRandNumsP17curandStateXORWOWPi"
.section .rodata.str1.1
.LC6:
.string "_Z4initfP17curandStateXORWOW"
.LC7:
.string "precalc_xorwow_matrix"
.LC8:
.string "precalc_xorwow_offset_matrix"
.LC9:
.string "mrg32k3aM1"
.LC10:
.string "mrg32k3aM2"
.LC11:
.string "mrg32k3aM1SubSeq"
.LC12:
.string "mrg32k3aM2SubSeq"
.LC13:
.string "mrg32k3aM1Seq"
.LC14:
.string "mrg32k3aM2Seq"
.LC15:
.string "__cr_lgamma_table"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3926:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z11getRandNumsP17curandStateXORWOWPi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z4initfP17curandStateXORWOW(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21precalc_xorwow_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM1(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM2(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM1Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM2Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $72, %r9d
movl $0, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17__cr_lgamma_table(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3926:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL17__cr_lgamma_table
.comm _ZL17__cr_lgamma_table,72,32
.local _ZL13mrg32k3aM2Seq
.comm _ZL13mrg32k3aM2Seq,2304,32
.local _ZL13mrg32k3aM1Seq
.comm _ZL13mrg32k3aM1Seq,2304,32
.local _ZL16mrg32k3aM2SubSeq
.comm _ZL16mrg32k3aM2SubSeq,2016,32
.local _ZL16mrg32k3aM1SubSeq
.comm _ZL16mrg32k3aM1SubSeq,2016,32
.local _ZL10mrg32k3aM2
.comm _ZL10mrg32k3aM2,2304,32
.local _ZL10mrg32k3aM1
.comm _ZL10mrg32k3aM1,2304,32
.local _ZL28precalc_xorwow_offset_matrix
.comm _ZL28precalc_xorwow_offset_matrix,102400,32
.local _ZL21precalc_xorwow_matrix
.comm _ZL21precalc_xorwow_matrix,102400,32
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC3:
.long 0
.long 1074790400
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cudaPi.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z19__device_stub__initfP12hiprandState # -- Begin function _Z19__device_stub__initfP12hiprandState
.p2align 4, 0x90
.type _Z19__device_stub__initfP12hiprandState,@function
_Z19__device_stub__initfP12hiprandState: # @_Z19__device_stub__initfP12hiprandState
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movss %xmm0, 4(%rsp)
movq %rdi, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 64(%rsp)
leaq 56(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z4initfP12hiprandState, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z19__device_stub__initfP12hiprandState, .Lfunc_end0-_Z19__device_stub__initfP12hiprandState
.cfi_endproc
# -- End function
.globl _Z26__device_stub__getRandNumsP12hiprandStatePi # -- Begin function _Z26__device_stub__getRandNumsP12hiprandStatePi
.p2align 4, 0x90
.type _Z26__device_stub__getRandNumsP12hiprandStatePi,@function
_Z26__device_stub__getRandNumsP12hiprandStatePi: # @_Z26__device_stub__getRandNumsP12hiprandStatePi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z11getRandNumsP12hiprandStatePi, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z26__device_stub__getRandNumsP12hiprandStatePi, .Lfunc_end1-_Z26__device_stub__getRandNumsP12hiprandStatePi
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x4010000000000000 # double 4
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $128, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movl %edi, %ebp
callq __errno_location
movl $0, (%rax)
movq $0, 120(%rsp)
cmpl $2, %ebp
jl .LBB2_3
# %bb.1:
movq %rax, %r14
movq 8(%rbx), %rdi
leaq 120(%rsp), %rsi
movl $10, %edx
callq __isoc23_strtol
testq %rax, %rax
je .LBB2_6
.LBB2_2:
movq %rax, %r15
jmp .LBB2_10
.LBB2_3:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $49, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB2_28
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB2_8
# %bb.5:
movzbl 67(%rbx), %eax
jmp .LBB2_9
.LBB2_6:
movl $256, %r15d # imm = 0x100
cmpl $0, (%r14)
jne .LBB2_10
jmp .LBB2_2
.LBB2_8:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $256, %r15d # imm = 0x100
.LBB2_10:
movl %r15d, %ebx
shll $9, %ebx
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $8, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movq %r15, %rsi
callq _ZNSo9_M_insertIlEERSoT_
movq %rax, %r14
movl $.L.str.2, %esi
movl $16, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r14, %rdi
movl %ebx, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB2_28
# %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i39
cmpb $0, 56(%r14)
je .LBB2_13
# %bb.12:
movzbl 67(%r14), %ecx
jmp .LBB2_14
.LBB2_13:
movq %r14, %rdi
movq %rax, %r12
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r12, %rax
.LBB2_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit42
movabsq $4294967808, %r14 # imm = 0x100000200
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movslq %ebx, %r12
movq %r12, %rax
shlq $4, %rax
leaq (%rax,%rax,2), %rsi
leaq 16(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movl %r15d, %eax
leaq (%rax,%r14), %r15
addq $-512, %r15 # imm = 0xFE00
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_16
# %bb.15:
xorl %edi, %edi
callq time
cvtsi2ss %rax, %xmm0
movq 16(%rsp), %rax
movss %xmm0, 24(%rsp)
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z4initfP12hiprandState, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_16:
shlq $3, %r12
leaq 8(%rsp), %rdi
movq %r12, %rsi
movl $1, %edx
callq hipMallocManaged
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_18
# %bb.17:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 72(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11getRandNumsP12hiprandStatePi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_18:
callq hipDeviceSynchronize
testl %ebx, %ebx
jle .LBB2_22
# %bb.19: # %.lr.ph
movq 8(%rsp), %rax
movl %ebx, %ecx
xorl %edx, %edx
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_20: # =>This Inner Loop Header: Depth=1
xorl %edi, %edi
cmpl $1, (%rax,%rdx,4)
sete %dil
addl %edi, %esi
incq %rdx
cmpq %rdx, %rcx
jne .LBB2_20
# %bb.21: # %._crit_edge.loopexit
xorps %xmm0, %xmm0
cvtsi2sd %esi, %xmm0
jmp .LBB2_23
.LBB2_22:
xorps %xmm0, %xmm0
.LBB2_23: # %._crit_edge
cvtsi2sd %ebx, %xmm1
divsd %xmm1, %xmm0
mulsd .LCPI2_0(%rip), %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 92(%rsp) # 4-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 92(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB2_28
# %bb.24: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i44
cmpb $0, 56(%rbx)
je .LBB2_26
# %bb.25:
movzbl 67(%rbx), %ecx
jmp .LBB2_27
.LBB2_26:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB2_27: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit47
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $128, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_28:
.cfi_def_cfa_offset 176
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4initfP12hiprandState, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11getRandNumsP12hiprandStatePi, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4initfP12hiprandState,@object # @_Z4initfP12hiprandState
.section .rodata,"a",@progbits
.globl _Z4initfP12hiprandState
.p2align 3, 0x0
_Z4initfP12hiprandState:
.quad _Z19__device_stub__initfP12hiprandState
.size _Z4initfP12hiprandState, 8
.type _Z11getRandNumsP12hiprandStatePi,@object # @_Z11getRandNumsP12hiprandStatePi
.globl _Z11getRandNumsP12hiprandStatePi
.p2align 3, 0x0
_Z11getRandNumsP12hiprandStatePi:
.quad _Z26__device_stub__getRandNumsP12hiprandStatePi
.size _Z11getRandNumsP12hiprandStatePi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Number of blocks not specified, using default 256"
.size .L.str, 50
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Blocks: "
.size .L.str.1, 9
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz " Total Threads: "
.size .L.str.2, 17
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Pi is approx: "
.size .L.str.3, 15
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4initfP12hiprandState"
.size .L__unnamed_1, 24
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z11getRandNumsP12hiprandStatePi"
.size .L__unnamed_2, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__initfP12hiprandState
.addrsig_sym _Z26__device_stub__getRandNumsP12hiprandStatePi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4initfP12hiprandState
.addrsig_sym _Z11getRandNumsP12hiprandStatePi
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <cuda.h>
__global__
void MyKernel()
{
printf("blockIdx.x=%u,ThreadIdx.x=%u\n",blockIdx.x,threadIdx.x);
return;
}
int main()
{
printf("Kernel (Blocks x Threads)\n");
MyKernel<<<1, 2>>>();
printf("\n\n****Kernel (1x2) launched****\n\n");
cudaDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
MyKernel<<<2, 1>>>();
printf("\n\n****Kernel (2x1) launched****\n\n");
cudaDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
MyKernel<<<2, 2>>>();
printf("\n\n****Kernel (2x2) launched****\n\n");
cudaDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
return 0;
} | code for sm_80
Function : _Z8MyKernelv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fc800078e00ff */
/*0010*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e220000002100 */
/*0020*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */
/* 0x000fe20007ffe0ff */
/*0030*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0040*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0050*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */
/* 0x000e220000002500 */
/*0060*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */
/* 0x000fe20007f1e0ff */
/*0070*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0080*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x0002a60000000a00 */
/*0090*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */
/* 0x000fe200000e06ff */
/*00a0*/ STL.64 [R1], R8 ; /* 0x0000000801007387 */
/* 0x0013e80000100a00 */
/*00b0*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x002fc60000000000 */
/*00c0*/ MOV R11, 0x130 ; /* 0x00000130000b7802 */
/* 0x000fe40000000f00 */
/*00d0*/ MOV R20, 0xb0 ; /* 0x000000b000147802 */
/* 0x000fc40000000f00 */
/*00e0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*00f0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*0100*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*0110*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*0120*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x004fea0003c00000 */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <cuda.h>
__global__
void MyKernel()
{
printf("blockIdx.x=%u,ThreadIdx.x=%u\n",blockIdx.x,threadIdx.x);
return;
}
int main()
{
printf("Kernel (Blocks x Threads)\n");
MyKernel<<<1, 2>>>();
printf("\n\n****Kernel (1x2) launched****\n\n");
cudaDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
MyKernel<<<2, 1>>>();
printf("\n\n****Kernel (2x1) launched****\n\n");
cudaDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
MyKernel<<<2, 2>>>();
printf("\n\n****Kernel (2x2) launched****\n\n");
cudaDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
return 0;
} | .file "tmpxft_000be753_00000000-6_first.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z8MyKernelvv
.type _Z26__device_stub__Z8MyKernelvv, @function
_Z26__device_stub__Z8MyKernelvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z8MyKernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z26__device_stub__Z8MyKernelvv, .-_Z26__device_stub__Z8MyKernelvv
.globl _Z8MyKernelv
.type _Z8MyKernelv, @function
_Z8MyKernelv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z8MyKernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z8MyKernelv, .-_Z8MyKernelv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Kernel (Blocks x Threads)\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "\n\n****Kernel (1x2) launched****\n\n"
.section .rodata.str1.1
.LC2:
.string "\n****Kernel finished****\n\n"
.section .rodata.str1.8
.align 8
.LC3:
.string "\n\n****Kernel (2x1) launched****\n\n"
.align 8
.LC4:
.string "\n\n****Kernel (2x2) launched****\n\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call cudaDeviceSynchronize@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $2, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call cudaDeviceSynchronize@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $2, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L14:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call cudaDeviceSynchronize@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
call _Z26__device_stub__Z8MyKernelvv
jmp .L12
.L17:
call _Z26__device_stub__Z8MyKernelvv
jmp .L13
.L18:
call _Z26__device_stub__Z8MyKernelvv
jmp .L14
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC5:
.string "_Z8MyKernelv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z8MyKernelv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <cuda.h>
__global__
void MyKernel()
{
printf("blockIdx.x=%u,ThreadIdx.x=%u\n",blockIdx.x,threadIdx.x);
return;
}
int main()
{
printf("Kernel (Blocks x Threads)\n");
MyKernel<<<1, 2>>>();
printf("\n\n****Kernel (1x2) launched****\n\n");
cudaDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
MyKernel<<<2, 1>>>();
printf("\n\n****Kernel (2x1) launched****\n\n");
cudaDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
MyKernel<<<2, 2>>>();
printf("\n\n****Kernel (2x2) launched****\n\n");
cudaDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
return 0;
} | #include <stdio.h>
#include <hip/hip_runtime.h>
__global__
void MyKernel()
{
printf("blockIdx.x=%u,ThreadIdx.x=%u\n",blockIdx.x,threadIdx.x);
return;
}
int main()
{
printf("Kernel (Blocks x Threads)\n");
MyKernel<<<1, 2>>>();
printf("\n\n****Kernel (1x2) launched****\n\n");
hipDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
MyKernel<<<2, 1>>>();
printf("\n\n****Kernel (2x1) launched****\n\n");
hipDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
MyKernel<<<2, 2>>>();
printf("\n\n****Kernel (2x2) launched****\n\n");
hipDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
__global__
void MyKernel()
{
printf("blockIdx.x=%u,ThreadIdx.x=%u\n",blockIdx.x,threadIdx.x);
return;
}
int main()
{
printf("Kernel (Blocks x Threads)\n");
MyKernel<<<1, 2>>>();
printf("\n\n****Kernel (1x2) launched****\n\n");
hipDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
MyKernel<<<2, 1>>>();
printf("\n\n****Kernel (2x1) launched****\n\n");
hipDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
MyKernel<<<2, 2>>>();
printf("\n\n****Kernel (2x2) launched****\n\n");
hipDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
return 0;
} | .text
.file "first.hip"
.globl _Z23__device_stub__MyKernelv # -- Begin function _Z23__device_stub__MyKernelv
.p2align 4, 0x90
.type _Z23__device_stub__MyKernelv,@function
_Z23__device_stub__MyKernelv: # @_Z23__device_stub__MyKernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z8MyKernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z23__device_stub__MyKernelv, .Lfunc_end0-_Z23__device_stub__MyKernelv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $56, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movabsq $4294967298, %rbx # imm = 0x100000002
movl $.Lstr, %edi
callq puts@PLT
leaq -1(%rbx), %r14
movq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z8MyKernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movl $.Lstr.1, %edi
callq puts@PLT
callq hipDeviceSynchronize
movl $.Lstr.6, %edi
callq puts@PLT
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z8MyKernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movl $.Lstr.3, %edi
callq puts@PLT
callq hipDeviceSynchronize
movl $.Lstr.6, %edi
callq puts@PLT
movq %rbx, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_6
# %bb.5:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z8MyKernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_6:
movl $.Lstr.5, %edi
callq puts@PLT
callq hipDeviceSynchronize
movl $.Lstr.6, %edi
callq puts@PLT
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8MyKernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8MyKernelv,@object # @_Z8MyKernelv
.section .rodata,"a",@progbits
.globl _Z8MyKernelv
.p2align 3, 0x0
_Z8MyKernelv:
.quad _Z23__device_stub__MyKernelv
.size _Z8MyKernelv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8MyKernelv"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Kernel (Blocks x Threads)"
.size .Lstr, 26
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "\n\n****Kernel (1x2) launched****\n"
.size .Lstr.1, 33
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "\n\n****Kernel (2x1) launched****\n"
.size .Lstr.3, 33
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "\n\n****Kernel (2x2) launched****\n"
.size .Lstr.5, 33
.type .Lstr.6,@object # @str.6
.Lstr.6:
.asciz "\n****Kernel finished****\n"
.size .Lstr.6, 26
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__MyKernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8MyKernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000be753_00000000-6_first.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z8MyKernelvv
.type _Z26__device_stub__Z8MyKernelvv, @function
_Z26__device_stub__Z8MyKernelvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z8MyKernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z26__device_stub__Z8MyKernelvv, .-_Z26__device_stub__Z8MyKernelvv
.globl _Z8MyKernelv
.type _Z8MyKernelv, @function
_Z8MyKernelv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z8MyKernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z8MyKernelv, .-_Z8MyKernelv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Kernel (Blocks x Threads)\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "\n\n****Kernel (1x2) launched****\n\n"
.section .rodata.str1.1
.LC2:
.string "\n****Kernel finished****\n\n"
.section .rodata.str1.8
.align 8
.LC3:
.string "\n\n****Kernel (2x1) launched****\n\n"
.align 8
.LC4:
.string "\n\n****Kernel (2x2) launched****\n\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call cudaDeviceSynchronize@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $2, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call cudaDeviceSynchronize@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $2, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L14:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call cudaDeviceSynchronize@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
call _Z26__device_stub__Z8MyKernelvv
jmp .L12
.L17:
call _Z26__device_stub__Z8MyKernelvv
jmp .L13
.L18:
call _Z26__device_stub__Z8MyKernelvv
jmp .L14
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC5:
.string "_Z8MyKernelv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z8MyKernelv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "first.hip"
.globl _Z23__device_stub__MyKernelv # -- Begin function _Z23__device_stub__MyKernelv
.p2align 4, 0x90
.type _Z23__device_stub__MyKernelv,@function
_Z23__device_stub__MyKernelv: # @_Z23__device_stub__MyKernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z8MyKernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z23__device_stub__MyKernelv, .Lfunc_end0-_Z23__device_stub__MyKernelv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $56, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movabsq $4294967298, %rbx # imm = 0x100000002
movl $.Lstr, %edi
callq puts@PLT
leaq -1(%rbx), %r14
movq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z8MyKernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movl $.Lstr.1, %edi
callq puts@PLT
callq hipDeviceSynchronize
movl $.Lstr.6, %edi
callq puts@PLT
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z8MyKernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movl $.Lstr.3, %edi
callq puts@PLT
callq hipDeviceSynchronize
movl $.Lstr.6, %edi
callq puts@PLT
movq %rbx, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_6
# %bb.5:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z8MyKernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_6:
movl $.Lstr.5, %edi
callq puts@PLT
callq hipDeviceSynchronize
movl $.Lstr.6, %edi
callq puts@PLT
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8MyKernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8MyKernelv,@object # @_Z8MyKernelv
.section .rodata,"a",@progbits
.globl _Z8MyKernelv
.p2align 3, 0x0
_Z8MyKernelv:
.quad _Z23__device_stub__MyKernelv
.size _Z8MyKernelv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8MyKernelv"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Kernel (Blocks x Threads)"
.size .Lstr, 26
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "\n\n****Kernel (1x2) launched****\n"
.size .Lstr.1, 33
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "\n\n****Kernel (2x1) launched****\n"
.size .Lstr.3, 33
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "\n\n****Kernel (2x2) launched****\n"
.size .Lstr.5, 33
.type .Lstr.6,@object # @str.6
.Lstr.6:
.asciz "\n****Kernel finished****\n"
.size .Lstr.6, 26
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__MyKernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8MyKernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<stdio.h>
#include<math.h>
#include<stdlib.h>
//#include<cuda.h>
#include<unistd.h>
#include<time.h>
/*
for(i=0;i<N/c;i++)
{
for(j=0;j<cols[i];j++)
{
result[i*c+0]+=scval_flat[cs[i]+(j*c)]*vecX[sccol_flat[cs[i]+(j*2)]];
result[i*c+1]+=scval_flat[cs[i]+(j*c)+1]*vecX[sccol_flat[cs[i]+(j*2)+1]];
}
}
*/
__global__ void multiply(int *scval, int *sccol, int *vec, int *result, int *cols, int *cs)
{
int tid=blockIdx.x;
int sum1=0;
int j;
//int colidx=tid/2;
//printf("\n tid= %d", tid);
//printf("\n Writing to %d",tid*c+threadIdx.x);
for(j=0;j<cols[tid];j++)
{
sum1 += scval[cs[tid]+(j*blockDim.x)+threadIdx.x]*vec[sccol[cs[tid]+(j*blockDim.x)+threadIdx.x]];
// sum2 += scval[cs[tid]+(j*2)+1]*vec[sccol[cs[tid]+(j*2)+1]];
}
__syncthreads();
result[tid*blockDim.x+threadIdx.x]=sum1;
// result[tid*c+1]=sum2;
}
__global__ void printmatscreen(int* mat, int N)
{
int i;
for (i=0;i<N;i++)
{
printf("%d ",mat[i]);
}
printf("\n");
}
int** Make2DIntArray(int arraySizeX, int arraySizeY)
{
int** theArray;
theArray = (int**) malloc(arraySizeX*sizeof(int*));
int i;
for (i = 0; i < arraySizeX; i++)
theArray[i] = (int*) malloc(arraySizeY*sizeof(int));
int j;
for (i=0;i<arraySizeX;i++)
{
for (j=0;j<arraySizeY;j++)
{
theArray[i][j]=0;
}
}
return theArray;
}
int** Make2DVariableIntArray(int rows, int blocks, int blocksize, int* columns)
{
int** theArray;
theArray = (int**) malloc(rows*sizeof(int*));
int i, j, k;
for (i = 0; i < blocks; i++)
{
k=columns[i];
for (j=0; j < blocksize; j++)
{
theArray[i*blocksize+j] = (int*) malloc(k*sizeof(int));
}
}
//int j;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
theArray[i*blocksize+j][k]=0;
}
}
}
return theArray;
}
int** Changeto2DVariableIntArray(int** theArray,int rows, int blocks, int blocksize, int* columns)
{
int** NewArray=Make2DVariableIntArray(rows,blocks,blocksize,columns);
int i, j, k;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
NewArray[i*blocksize+j][k]=theArray[i*blocksize+j][k];
}
}
}
printf("changed to multiple matrixes");
return NewArray;
}
void init_zeros(int** matrix, int N)
{
int i,j;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
matrix[i][j]=0;
}
}
}
void printmat(int** matrix, int N, int Nj)
{
int i,j;
for (i=0;i<N;i++)
{
printf("\n");
for (j=0;j<N;j++)
{
printf("%d ",matrix[i][j]);
}
}
printf("\n");
}
void printtofile(int** matrix, int K, char* filename)
{
/*
Prints original 2D matrices to file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i,j;
for (i=0;i<K;i++)
{
fprintf(fp, "\n");
for (j=0;j<K;j++)
{
fprintf(fp, "%d\t", matrix[i][j]);
}
}
}
void printtofile1D(int* matrix, int K, char* filename)
{
/*
Prints resultant matrix to a file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i;
for (i=0;i<K;i++)
{
fprintf(fp, "%d\n", matrix[i]);
}
}
int* Make1DIntArray(int arraySizeX)
{
int* theArray;
theArray = (int*)malloc(arraySizeX*sizeof(int));
int i;
for (i=0;i<arraySizeX;i++)
{
theArray[i]=0;
}
return theArray;
}
void freese(int sizeX, int sizeY, double** ptr)
{
int i;
for (i=0;i<sizeX;i++)
free(ptr[i]);
free(ptr);
}
int main()
{
int N=5000;
// const int Dsize=1000;
FILE *arr, *vec;
int i,j,maxrowwidth=0,tint=0;
int** a=Make2DIntArray(N,N);
// int* val=Make1DIntArray(Dsize);
// int* col=Make1DIntArray(Dsize);
// int* row=Make1DIntArray(Dsize);
int* result=Make1DIntArray(N);
int* vecX=Make1DIntArray(N);
int** scval=Make2DIntArray(N,N); //sell c value
int** sccol=Make2DIntArray(N,N); //sell c col
int* rowwidth=Make1DIntArray(N); //number of elements in each row
int* temp=Make1DIntArray(N);
int* rows=Make1DIntArray(N);
int* resultsordered=Make1DIntArray(N);
int sig=4,c=2;
// int* rowwidth=Make1DIntArray(N);
int *dev_vec, *dev_scval, *dev_result, *dev_sccol, *dev_cols, *dev_cs;
//int val[10],col[10],row[10];
arr=fopen("matrix5000.txt","r");
int k=0;
// struct timeval start, end;
// gettimeofday(&start, NULL);
//Reading the vector
vec=fopen("vector5000.txt","r");
for (i=0;i<N;i++)
{
fscanf(vec,"%d",&vecX[i]);
rows[i]=i;
}
//Reading the matrix
for(i=0;i<N;i++)
{
//printf("\n");
for(j=0;j<N;j++)
{
fscanf(arr,"%d",&a[i][j]);
// printf("%d ",a[i][j]);
}
}
printf("\n");
//row[i]=k;
//printf("\n k = %d\n ", k);
//sleep(10);
// gettimeofday(&end, NULL);
// double delta = ((end.tv_sec - start.tv_sec) * 1000000u +
// end.tv_usec - start.tv_usec) / 1.e6;
// printf("\nTime spent=%f\n", delta);
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(a[i][j]!=0)
{
scval[i][k]=a[i][j];
sccol[i][k]=j;
rowwidth[i]=k+1;
if(rowwidth[i]>maxrowwidth)
{
maxrowwidth=rowwidth[i];
}k++;
}
}
//printf("\nRow width %d = %d", i, rowwidth[i]);
k=0;
}
if(sig>1&&c!=sig)
{
for(i=0;i<N;i=i+sig)
{
for(k=0;k<sig-1;k++)
{
for(j=i;(j<i+sig-1) && (j<N);j++)
{
if(rowwidth[j]<rowwidth[j+1])
{
temp=scval[j];
scval[j]=scval[j+1];
scval[j+1]=temp;
temp=sccol[j];
sccol[j]=sccol[j+1];
sccol[j+1]=temp;
tint=rowwidth[j];
rowwidth[j]=rowwidth[j+1];
rowwidth[j+1]=tint;
tint=rows[j];
rows[j]=rows[j+1];
rows[j+1]=tint;
}
}
}
}
}
/* for(i=0;i<N;i++)
{
if(scval[i][0]==0)
{
break;
}
}
N=i;
*/
printf("\nmaxrowwidth=%d\n",maxrowwidth);
// printmat(scval,N,N);
// printtofile(scval,N,"scval.txt");
// printtofile(sccol,N,"sccol.txt");
/* printf("\n Shuffled rows is:\n");
for (i=0;i<N;i++)
{
printf("%d\n",rows[i]);
}
*/
//printmatscreen<<<1,1>>>(dev_b,N);
/* multiply<<<N,N>>>(dev_a, dev_b, dev_c, N, N);
cudaMemcpy(result, dev_c, sizeof(int)*N, cudaMemcpyDeviceToHost);
for (i=0;i<N;i++)
{
printf("\n%d",result[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// NEED TO FIGURE OUT A WAY TO POPULATE cols SO AS TO HAVE varmat CREATED PROPERLY. SYSTEM CRASHES OTHERWISE
*/
int* cols=Make1DIntArray(N/c);
j=0;
int colsum=0;
for(i=0;i<N;i=i+c)
{
cols[j]=rowwidth[i];
colsum+=cols[j];
j++;
}
int** varscval=Changeto2DVariableIntArray(scval,N,N/c,c,cols);
int** varsccol=Changeto2DVariableIntArray(sccol,N,N/c,c,cols);
/* for (i=0;i<N/c;i++)
{
for(j=0;j<c;j++)
{
printf("\n");
for (k=0;k<cols[i];k++)
{
printf("%d ",varscval[i*c+j][k]);
//printf("%d ",varsccol[i*c+j][k]);
}
}
}
*/
int varsize=colsum*c;
//flattening scval and sccol
int counters=0;
int* scval_flat=Make1DIntArray(varsize);
int* sccol_flat=Make1DIntArray(varsize);
int* cs=Make1DIntArray((N/c)+1);
cs[0]=0;
int countcols=0;
int z=0;
printf("\n");
printf("\n");
printf("\n");
for (i=0;i<N/c;i++)
{
countcols=0;
for(j=0;j<cols[i];j++)
{
for (k=0;k<c;k++)
{
scval_flat[counters]=varscval[i*c+k][j];
sccol_flat[counters]=varsccol[i*c+k][j];
//printf("%d ",scval_flat[counters]);
//printf("%d\n", sccol_flat[counters]);
counters=counters+1;
countcols=countcols+1;
}
}
cs[z+1]=cs[z]+countcols;
z=z+1;
}
/* printf("\ncs:");
for(i=1;i<(N/c)+1;i++)
printf("%d ", cs[i]);
/*
for(i=0;i<N/c;i++)
{
for(j=0;j<cols[i];j++)
{
result[i*c+0]+=scval_flat[cs[i]+(j*c)]*vecX[sccol_flat[cs[i]+(j*2)]];
result[i*c+1]+=scval_flat[cs[i]+(j*c)+1]*vecX[sccol_flat[cs[i]+(j*2)+1]];
}
}
printtofile1D(result,N,"resultstest.txt");
*/
cudaEvent_t start, stop, start_kernel, stop_kernel;
float time_kernel;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&start_kernel);
cudaEventCreate(&stop_kernel);
cudaMalloc((void**)&dev_vec, sizeof(int)*N);
cudaMalloc((void**)&dev_scval, sizeof(int)*varsize);
cudaMalloc((void**)&dev_result, sizeof(int)*N);
cudaMalloc((void**)&dev_sccol, sizeof(int)*varsize);
cudaMalloc((void**)&dev_cols, sizeof(int)*(N/c));
cudaMalloc((void**)&dev_cs, sizeof(int)*(N/c));
//cudaEventRecord(start,0);
cudaMemcpy(dev_vec, vecX, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_scval, scval_flat, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_result, result, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_sccol, sccol_flat, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cols, cols, sizeof(int)*(N/c), cudaMemcpyHostToDevice);
cudaMemcpy(dev_cs, cs, sizeof(int)*(N/c), cudaMemcpyHostToDevice);
cudaEventRecord(start_kernel,0);
multiply<<<N/c,c>>>(dev_scval, dev_sccol, dev_vec, dev_result, dev_cols, dev_cs);
//sleep(10);
cudaEventRecord(stop_kernel,0);
cudaMemcpy(result, dev_result, sizeof(int)*N, cudaMemcpyDeviceToHost);
//cudaEventRecord(stop,0);
cudaEventSynchronize(stop_kernel);
//cudaEventElapsedTime(&time, start, stop);
cudaEventElapsedTime(&time_kernel, start_kernel, stop_kernel);
//printf("\nTime for kernel with data transfer = %f ms \n", time);
printf("\nTime for kernel without data transfer = %f ms \n", time_kernel);
for (i=0;i<N;i++)
{
resultsordered[rows[i]]=result[i];
}
printtofile1D(resultsordered,N,"results.txt");
// CODE TO RESHUFFLE BACK
cudaFree(dev_vec);
cudaFree(dev_scval);
cudaFree(dev_result);
cudaFree(dev_sccol);
cudaFree(dev_cols);
return 0;
/*
cudaMalloc((void**)&dev_vec, sizeof(int)*N);
cudaMalloc((void**)&dev_scval_flat, sizeof(int)*varsize);
cudaMalloc((void**)&dev_result, sizeof(int)*N);
cudaMalloc((void**)&dev_sccol_flat, sizeof(int)*varsize);
cudaMemcpy(dev_a, vecX, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, varscval, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, result, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_d, varsccol, sizeof(int)*varsize, cudaMemcpyHostToDevice);
*/
} | code for sm_80
Function : _Z14printmatscreenPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fc800078e00ff */
/*0010*/ IMAD.MOV.U32 R25, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff197624 */
/* 0x000fe200078e00ff */
/*0020*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */
/* 0x000fc80007ffe0ff */
/*0030*/ ISETP.GE.AND P0, PT, R25, 0x1, PT ; /* 0x000000011900780c */
/* 0x000fda0003f06270 */
/*0040*/ @!P0 BRA 0x750 ; /* 0x0000070000008947 */
/* 0x000fea0003800000 */
/*0050*/ IADD3 R0, R25, -0x1, RZ ; /* 0xffffffff19007810 */
/* 0x000fe20007ffe0ff */
/*0060*/ ULDC.64 UR36, c[0x0][0x118] ; /* 0x0000460000247ab9 */
/* 0x000fe20000000a00 */
/*0070*/ IADD3 R23, P1, R1, c[0x0][0x20], RZ ; /* 0x0000080001177a10 */
/* 0x000fe20007f3e0ff */
/*0080*/ IMAD.MOV.U32 R24, RZ, RZ, RZ ; /* 0x000000ffff187224 */
/* 0x000fe200078e00ff */
/*0090*/ ISETP.GE.U32.AND P0, PT, R0, 0x3, PT ; /* 0x000000030000780c */
/* 0x000fe40003f06070 */
/*00a0*/ LOP3.LUT R25, R25, 0x3, RZ, 0xc0, !PT ; /* 0x0000000319197812 */
/* 0x000fe200078ec0ff */
/*00b0*/ IMAD.X R22, RZ, RZ, c[0x0][0x24], P1 ; /* 0x00000900ff167624 */
/* 0x000fd400008e06ff */
/*00c0*/ @!P0 BRA 0x580 ; /* 0x000004b000008947 */
/* 0x000fea0003800000 */
/*00d0*/ BSSY B6, 0x580 ; /* 0x000004a000067945 */
/* 0x000fe20003800000 */
/*00e0*/ IADD3 R19, -R25, c[0x0][0x168], RZ ; /* 0x00005a0019137a10 */
/* 0x000fe20007ffe1ff */
/*00f0*/ IMAD.MOV.U32 R24, RZ, RZ, RZ ; /* 0x000000ffff187224 */
/* 0x000fe400078e00ff */
/*0100*/ IMAD.MOV.U32 R16, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff107624 */
/* 0x000fe400078e00ff */
/*0110*/ IMAD.MOV.U32 R17, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff117624 */
/* 0x000fca00078e00ff */
/*0120*/ LDG.E R0, [R16.64] ; /* 0x0000002410007981 */
/* 0x000ea2000c1e1900 */
/*0130*/ MOV R18, 0x0 ; /* 0x0000000000127802 */
/* 0x000fe20000000f00 */
/*0140*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0150*/ IADD3 R19, R19, -0x4, RZ ; /* 0xfffffffc13137810 */
/* 0x000fe20007ffe0ff */
/*0160*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0170*/ LDC.64 R2, c[0x4][R18] ; /* 0x0100000012027b82 */
/* 0x0000620000000a00 */
/*0180*/ IMAD.MOV.U32 R6, RZ, RZ, R23 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0017 */
/*0190*/ ISETP.NE.AND P0, PT, R19, RZ, PT ; /* 0x000000ff1300720c */
/* 0x000fe20003f05270 */
/*01a0*/ IMAD.MOV.U32 R7, RZ, RZ, R22 ; /* 0x000000ffff077224 */
/* 0x000fc600078e0016 */
/*01b0*/ P2R R26, PR, RZ, 0x1 ; /* 0x00000001ff1a7803 */
/* 0x000fe20000000000 */
/*01c0*/ STL [R1], R0 ; /* 0x0000000001007387 */
/* 0x0041e80000100800 */
/*01d0*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x002fe20000000000 */
/*01e0*/ MOV R11, 0x250 ; /* 0x00000250000b7802 */
/* 0x000fe40000000f00 */
/*01f0*/ MOV R20, 0x1d0 ; /* 0x000001d000147802 */
/* 0x000fe40000000f00 */
/*0200*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*0210*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x001fe40000000f00 */
/*0220*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*0230*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*0240*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x000fea0003c00000 */
/*0250*/ LDG.E R0, [R16.64+0x4] ; /* 0x0000042410007981 */
/* 0x000ea2000c1e1900 */
/*0260*/ LDC.64 R2, c[0x4][R18] ; /* 0x0100000012027b82 */
/* 0x0000620000000a00 */
/*0270*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe400078e00ff */
/*0280*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe400078e00ff */
/*0290*/ IMAD.MOV.U32 R6, RZ, RZ, R23 ; /* 0x000000ffff067224 */
/* 0x000fe400078e0017 */
/*02a0*/ IMAD.MOV.U32 R7, RZ, RZ, R22 ; /* 0x000000ffff077224 */
/* 0x000fe200078e0016 */
/*02b0*/ STL [R1], R0 ; /* 0x0000000001007387 */
/* 0x0041e80000100800 */
/*02c0*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x002fe20000000000 */
/*02d0*/ MOV R11, 0x340 ; /* 0x00000340000b7802 */
/* 0x000fc40000000f00 */
/*02e0*/ MOV R20, 0x2c0 ; /* 0x000002c000147802 */
/* 0x000fe40000000f00 */
/*02f0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*0300*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x001fe40000000f00 */
/*0310*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*0320*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*0330*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x000fea0003c00000 */
/*0340*/ LDG.E R0, [R16.64+0x8] ; /* 0x0000082410007981 */
/* 0x000ea2000c1e1900 */
/*0350*/ LDC.64 R2, c[0x4][R18] ; /* 0x0100000012027b82 */
/* 0x0000620000000a00 */
/*0360*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe400078e00ff */
/*0370*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe400078e00ff */
/*0380*/ IMAD.MOV.U32 R6, RZ, RZ, R23 ; /* 0x000000ffff067224 */
/* 0x000fe400078e0017 */
/*0390*/ IMAD.MOV.U32 R7, RZ, RZ, R22 ; /* 0x000000ffff077224 */
/* 0x000fe200078e0016 */
/*03a0*/ STL [R1], R0 ; /* 0x0000000001007387 */
/* 0x0041e80000100800 */
/*03b0*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x002fe20000000000 */
/*03c0*/ MOV R11, 0x430 ; /* 0x00000430000b7802 */
/* 0x000fc40000000f00 */
/*03d0*/ MOV R20, 0x3b0 ; /* 0x000003b000147802 */
/* 0x000fe40000000f00 */
/*03e0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*03f0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x001fe40000000f00 */
/*0400*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*0410*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*0420*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x000fea0003c00000 */
/*0430*/ LDG.E R0, [R16.64+0xc] ; /* 0x00000c2410007981 */
/* 0x000ea2000c1e1900 */
/*0440*/ LDC.64 R2, c[0x4][R18] ; /* 0x0100000012027b82 */
/* 0x0000620000000a00 */
/*0450*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe400078e00ff */
/*0460*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe400078e00ff */
/*0470*/ IMAD.MOV.U32 R6, RZ, RZ, R23 ; /* 0x000000ffff067224 */
/* 0x000fe400078e0017 */
/*0480*/ IMAD.MOV.U32 R7, RZ, RZ, R22 ; /* 0x000000ffff077224 */
/* 0x000fe200078e0016 */
/*0490*/ STL [R1], R0 ; /* 0x0000000001007387 */
/* 0x0041e80000100800 */
/*04a0*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x002fe20000000000 */
/*04b0*/ MOV R11, 0x520 ; /* 0x00000520000b7802 */
/* 0x000fc40000000f00 */
/*04c0*/ MOV R20, 0x4a0 ; /* 0x000004a000147802 */
/* 0x000fe40000000f00 */
/*04d0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*04e0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x001fe40000000f00 */
/*04f0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*0500*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*0510*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x000fea0003c00000 */
/*0520*/ ISETP.NE.AND P6, PT, R26, RZ, PT ; /* 0x000000ff1a00720c */
/* 0x000fe40003fc5270 */
/*0530*/ IADD3 R16, P0, R16, 0x10, RZ ; /* 0x0000001010107810 */
/* 0x000fe40007f1e0ff */
/*0540*/ IADD3 R24, R24, 0x4, RZ ; /* 0x0000000418187810 */
/* 0x000fc60007ffe0ff */
/*0550*/ IMAD.X R17, RZ, RZ, R17, P0 ; /* 0x000000ffff117224 */
/* 0x000fcc00000e0611 */
/*0560*/ @P6 BRA 0x120 ; /* 0xfffffbb000006947 */
/* 0x000fea000383ffff */
/*0570*/ BSYNC B6 ; /* 0x0000000000067941 */
/* 0x000fea0003800000 */
/*0580*/ ISETP.NE.AND P0, PT, R25, RZ, PT ; /* 0x000000ff1900720c */
/* 0x000fe20003f05270 */
/*0590*/ BSSY B6, 0x750 ; /* 0x000001b000067945 */
/* 0x000fd80003800000 */
/*05a0*/ @!P0 BRA 0x740 ; /* 0x0000019000008947 */
/* 0x000fea0003800000 */
/*05b0*/ IMAD.MOV.U32 R17, RZ, RZ, 0x4 ; /* 0x00000004ff117424 */
/* 0x000fc800078e00ff */
/*05c0*/ IMAD.WIDE R16, R24, R17, c[0x0][0x160] ; /* 0x0000580018107625 */
/* 0x000fc800078e0211 */
/*05d0*/ IMAD.MOV.U32 R8, RZ, RZ, R16 ; /* 0x000000ffff087224 */
/* 0x000fe400078e0010 */
/*05e0*/ IMAD.MOV.U32 R9, RZ, RZ, R17 ; /* 0x000000ffff097224 */
/* 0x000fca00078e0011 */
/*05f0*/ LDG.E R0, [R8.64] ; /* 0x0000002408007981 */
/* 0x000ea2000c1e1900 */
/*0600*/ MOV R2, 0x0 ; /* 0x0000000000027802 */
/* 0x000fe20000000f00 */
/*0610*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe400078e00ff */
/*0620*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe400078e00ff */
/*0630*/ IMAD.MOV.U32 R6, RZ, RZ, R23 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0017 */
/*0640*/ LDC.64 R2, c[0x4][R2] ; /* 0x0100000002027b82 */
/* 0x000e220000000a00 */
/*0650*/ IMAD.MOV.U32 R7, RZ, RZ, R22 ; /* 0x000000ffff077224 */
/* 0x000fe200078e0016 */
/*0660*/ STL [R1], R0 ; /* 0x0000000001007387 */
/* 0x0043ec0000100800 */
/*0670*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x001fe20000000000 */
/*0680*/ MOV R11, 0x6f0 ; /* 0x000006f0000b7802 */
/* 0x000fc40000000f00 */
/*0690*/ MOV R20, 0x670 ; /* 0x0000067000147802 */
/* 0x000fe40000000f00 */
/*06a0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*06b0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x002fe40000000f00 */
/*06c0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*06d0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*06e0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x000fea0003c00000 */
/*06f0*/ IADD3 R25, R25, -0x1, RZ ; /* 0xffffffff19197810 */
/* 0x000fe40007ffe0ff */
/*0700*/ IADD3 R16, P1, R16, 0x4, RZ ; /* 0x0000000410107810 */
/* 0x000fe40007f3e0ff */
/*0710*/ ISETP.NE.AND P0, PT, R25, RZ, PT ; /* 0x000000ff1900720c */
/* 0x000fc60003f05270 */
/*0720*/ IMAD.X R17, RZ, RZ, R17, P1 ; /* 0x000000ffff117224 */
/* 0x000fd400008e0611 */
/*0730*/ @P0 BRA 0x5d0 ; /* 0xfffffe9000000947 */
/* 0x000fea000383ffff */
/*0740*/ BSYNC B6 ; /* 0x0000000000067941 */
/* 0x000fea0003800000 */
/*0750*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0760*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x10] ; /* 0x01000400ff047624 */
/* 0x000fe200078e00ff */
/*0770*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0780*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x14] ; /* 0x01000500ff057624 */
/* 0x000fe200078e00ff */
/*0790*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x00006c0000000a00 */
/*07a0*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x000fe20000000000 */
/*07b0*/ MOV R11, 0x820 ; /* 0x00000820000b7802 */
/* 0x000fe40000000f00 */
/*07c0*/ MOV R20, 0x7a0 ; /* 0x000007a000147802 */
/* 0x000fe40000000f00 */
/*07d0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*07e0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x001fc40000000f00 */
/*07f0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*0800*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*0810*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x002fea0003c00000 */
/*0820*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0830*/ BRA 0x830; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0840*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0850*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0860*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0870*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0880*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0890*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z8multiplyPiS_S_S_S_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R4, R0, R3, c[0x0][0x180] ; /* 0x0000600000047625 */
/* 0x001fca00078e0203 */
/*0050*/ LDG.E R2, [R4.64] ; /* 0x0000000404027981 */
/* 0x000ea4000c1e1900 */
/*0060*/ ISETP.GT.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x004fda0003f04270 */
/*0070*/ @!P0 MOV R24, RZ ; /* 0x000000ff00188202 */
/* 0x000fe20000000f00 */
/*0080*/ @!P0 BRA 0x4e0 ; /* 0x0000045000008947 */
/* 0x000fea0003800000 */
/*0090*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e220000002100 */
/*00a0*/ IADD3 R6, R2, -0x1, RZ ; /* 0xffffffff02067810 */
/* 0x000fe40007ffe0ff */
/*00b0*/ SHF.R.S32.HI R5, RZ, 0x1f, R0 ; /* 0x0000001fff057819 */
/* 0x000fe40000011400 */
/*00c0*/ ISETP.GE.U32.AND P1, PT, R6, 0x3, PT ; /* 0x000000030600780c */
/* 0x000fe40003f26070 */
/*00d0*/ LEA R8, P0, R0, c[0x0][0x188], 0x2 ; /* 0x0000620000087a11 */
/* 0x000fc800078010ff */
/*00e0*/ LEA.HI.X R9, R0, c[0x0][0x18c], R5, 0x2, P0 ; /* 0x0000630000097a11 */
/* 0x000fe400000f1405 */
/*00f0*/ LOP3.LUT R5, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302057812 */
/* 0x000fc600078ec0ff */
/*0100*/ LDG.E R4, [R8.64] ; /* 0x0000000408047981 */
/* 0x000362000c1e1900 */
/*0110*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe40003f05270 */
/*0120*/ MOV R6, RZ ; /* 0x000000ff00067202 */
/* 0x000fe40000000f00 */
/*0130*/ MOV R24, RZ ; /* 0x000000ff00187202 */
/* 0x000fe20000000f00 */
/*0140*/ @!P1 BRA 0x400 ; /* 0x000002b000009947 */
/* 0x000ff00003800000 */
/*0150*/ IADD3 R8, R4.reuse, R7, RZ ; /* 0x0000000704087210 */
/* 0x063fe20007ffe0ff */
/*0160*/ HFMA2.MMA R6, -RZ, RZ, 0, 0 ; /* 0x00000000ff067435 */
/* 0x000fe200000001ff */
/*0170*/ MOV R9, c[0x0][0x0] ; /* 0x0000000000097a02 */
/* 0x000fe20000000f00 */
/*0180*/ IMAD.IADD R11, R5, 0x1, -R2 ; /* 0x00000001050b7824 */
/* 0x000fe200078e0a02 */
/*0190*/ IADD3 R26, R4, c[0x0][0x0], R7 ; /* 0x00000000041a7a10 */
/* 0x000fc40007ffe007 */
/*01a0*/ LEA R2, R9.reuse, R8, 0x1 ; /* 0x0000000809027211 */
/* 0x040fe200078e08ff */
/*01b0*/ IMAD R10, R9, 0x3, R8 ; /* 0x00000003090a7824 */
/* 0x000fe400078e0208 */
/*01c0*/ IMAD.WIDE.U32 R14, R8, R3, c[0x0][0x168] ; /* 0x00005a00080e7625 */
/* 0x000fc800078e0003 */
/*01d0*/ IMAD.WIDE.U32 R16, R26, R3.reuse, c[0x0][0x168] ; /* 0x00005a001a107625 */
/* 0x080fe400078e0003 */
/*01e0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ea4000c1e1900 */
/*01f0*/ IMAD.WIDE.U32 R20, R2, R3.reuse, c[0x0][0x168] ; /* 0x00005a0002147625 */
/* 0x080fe400078e0003 */
/*0200*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000ee4000c1e1900 */
/*0210*/ IMAD.WIDE.U32 R22, R10, R3.reuse, c[0x0][0x168] ; /* 0x00005a000a167625 */
/* 0x080fe400078e0003 */
/*0220*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000f28000c1e1900 */
/*0230*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x000f62000c1e1900 */
/*0240*/ IMAD.WIDE.U32 R12, R8, R3, c[0x0][0x160] ; /* 0x00005800080c7625 */
/* 0x000fc800078e0003 */
/*0250*/ IMAD.WIDE.U32 R18, R26, R3.reuse, c[0x0][0x160] ; /* 0x000058001a127625 */
/* 0x080fe200078e0003 */
/*0260*/ LDG.E R25, [R12.64] ; /* 0x000000040c197981 */
/* 0x00076a000c1e1900 */
/*0270*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000f62000c1e1900 */
/*0280*/ IMAD.WIDE R28, R14, R3, c[0x0][0x170] ; /* 0x00005c000e1c7625 */
/* 0x004fc800078e0203 */
/*0290*/ IMAD.WIDE R12, R16, R3.reuse, c[0x0][0x170] ; /* 0x00005c00100c7625 */
/* 0x088fe400078e0203 */
/*02a0*/ LDG.E R28, [R28.64] ; /* 0x000000041c1c7981 */
/* 0x000ea8000c1e1900 */
/*02b0*/ LDG.E R27, [R12.64] ; /* 0x000000040c1b7981 */
/* 0x000ee2000c1e1900 */
/*02c0*/ IMAD.WIDE.U32 R14, R2, R3, c[0x0][0x160] ; /* 0x00005800020e7625 */
/* 0x000fc800078e0003 */
/*02d0*/ IMAD.WIDE R16, R20, R3.reuse, c[0x0][0x170] ; /* 0x00005c0014107625 */
/* 0x090fe400078e0203 */
/*02e0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000f24000c1e1900 */
/*02f0*/ IMAD.WIDE.U32 R20, R10, R3.reuse, c[0x0][0x160] ; /* 0x000058000a147625 */
/* 0x080fe400078e0003 */
/*0300*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000f24000c1e1900 */
/*0310*/ IMAD.WIDE R22, R22, R3, c[0x0][0x170] ; /* 0x00005c0016167625 */
/* 0x020fe400078e0203 */
/*0320*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000f68000c1e1900 */
/*0330*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x000f62000c1e1900 */
/*0340*/ IADD3 R6, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x000fe20007ffe0ff */
/*0350*/ IMAD R26, R9.reuse, 0x4, R26 ; /* 0x00000004091a7824 */
/* 0x040fe200078e021a */
/*0360*/ LEA R2, R9, R2, 0x2 ; /* 0x0000000209027211 */
/* 0x000fc400078e10ff */
/*0370*/ LEA R10, R9.reuse, R10, 0x2 ; /* 0x0000000a090a7211 */
/* 0x040fe400078e10ff */
/*0380*/ LEA R8, R9, R8, 0x2 ; /* 0x0000000809087211 */
/* 0x000fe200078e10ff */
/*0390*/ IMAD R25, R28, R25, R24 ; /* 0x000000191c197224 */
/* 0x004fc800078e0218 */
/*03a0*/ IMAD R25, R27, R18, R25 ; /* 0x000000121b197224 */
/* 0x008fe200078e0219 */
/*03b0*/ IADD3 R18, R11, R6, RZ ; /* 0x000000060b127210 */
/* 0x000fc80007ffe0ff */
/*03c0*/ ISETP.NE.AND P1, PT, R18, RZ, PT ; /* 0x000000ff1200720c */
/* 0x000fe20003f25270 */
/*03d0*/ IMAD R25, R16, R14, R25 ; /* 0x0000000e10197224 */
/* 0x010fc800078e0219 */
/*03e0*/ IMAD R24, R22, R20, R25 ; /* 0x0000001416187224 */
/* 0x020fd000078e0219 */
/*03f0*/ @P1 BRA 0x1c0 ; /* 0xfffffdc000001947 */
/* 0x000fea000383ffff */
/*0400*/ @!P0 BRA 0x4e0 ; /* 0x000000d000008947 */
/* 0x000fea0003800000 */
/*0410*/ IADD3 R7, R4, R7, RZ ; /* 0x0000000704077210 */
/* 0x021fca0007ffe0ff */
/*0420*/ IMAD R2, R6, c[0x0][0x0], R7 ; /* 0x0000000006027a24 */
/* 0x000fc800078e0207 */
/*0430*/ IMAD.WIDE.U32 R8, R2, R3, c[0x0][0x168] ; /* 0x00005a0002087625 */
/* 0x002fcc00078e0003 */
/*0440*/ LDG.E R8, [R8.64] ; /* 0x0000000408087981 */
/* 0x000ea2000c1e1900 */
/*0450*/ IMAD.WIDE.U32 R6, R2, R3, c[0x0][0x160] ; /* 0x0000580002067625 */
/* 0x000fcc00078e0003 */
/*0460*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ee2000c1e1900 */
/*0470*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */
/* 0x000fe20007ffe0ff */
/*0480*/ IMAD.WIDE R10, R8, R3, c[0x0][0x170] ; /* 0x00005c00080a7625 */
/* 0x004fcc00078e0203 */
/*0490*/ LDG.E R11, [R10.64] ; /* 0x000000040a0b7981 */
/* 0x000ee2000c1e1900 */
/*04a0*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe40003f05270 */
/*04b0*/ IADD3 R2, R2, c[0x0][0x0], RZ ; /* 0x0000000002027a10 */
/* 0x000fe20007ffe0ff */
/*04c0*/ IMAD R24, R11, R6, R24 ; /* 0x000000060b187224 */
/* 0x008fd400078e0218 */
/*04d0*/ @P0 BRA 0x430 ; /* 0xffffff5000000947 */
/* 0x000fea000383ffff */
/*04e0*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000ea80000002100 */
/*04f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0500*/ IMAD R2, R0, c[0x0][0x0], R5 ; /* 0x0000000000027a24 */
/* 0x004fc800078e0205 */
/*0510*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x178] ; /* 0x00005e0002027625 */
/* 0x000fca00078e0003 */
/*0520*/ STG.E [R2.64], R24 ; /* 0x0000001802007986 */
/* 0x000fe2000c101904 */
/*0530*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0540*/ BRA 0x540; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<stdio.h>
#include<math.h>
#include<stdlib.h>
//#include<cuda.h>
#include<unistd.h>
#include<time.h>
/*
for(i=0;i<N/c;i++)
{
for(j=0;j<cols[i];j++)
{
result[i*c+0]+=scval_flat[cs[i]+(j*c)]*vecX[sccol_flat[cs[i]+(j*2)]];
result[i*c+1]+=scval_flat[cs[i]+(j*c)+1]*vecX[sccol_flat[cs[i]+(j*2)+1]];
}
}
*/
__global__ void multiply(int *scval, int *sccol, int *vec, int *result, int *cols, int *cs)
{
int tid=blockIdx.x;
int sum1=0;
int j;
//int colidx=tid/2;
//printf("\n tid= %d", tid);
//printf("\n Writing to %d",tid*c+threadIdx.x);
for(j=0;j<cols[tid];j++)
{
sum1 += scval[cs[tid]+(j*blockDim.x)+threadIdx.x]*vec[sccol[cs[tid]+(j*blockDim.x)+threadIdx.x]];
// sum2 += scval[cs[tid]+(j*2)+1]*vec[sccol[cs[tid]+(j*2)+1]];
}
__syncthreads();
result[tid*blockDim.x+threadIdx.x]=sum1;
// result[tid*c+1]=sum2;
}
__global__ void printmatscreen(int* mat, int N)
{
int i;
for (i=0;i<N;i++)
{
printf("%d ",mat[i]);
}
printf("\n");
}
int** Make2DIntArray(int arraySizeX, int arraySizeY)
{
int** theArray;
theArray = (int**) malloc(arraySizeX*sizeof(int*));
int i;
for (i = 0; i < arraySizeX; i++)
theArray[i] = (int*) malloc(arraySizeY*sizeof(int));
int j;
for (i=0;i<arraySizeX;i++)
{
for (j=0;j<arraySizeY;j++)
{
theArray[i][j]=0;
}
}
return theArray;
}
int** Make2DVariableIntArray(int rows, int blocks, int blocksize, int* columns)
{
int** theArray;
theArray = (int**) malloc(rows*sizeof(int*));
int i, j, k;
for (i = 0; i < blocks; i++)
{
k=columns[i];
for (j=0; j < blocksize; j++)
{
theArray[i*blocksize+j] = (int*) malloc(k*sizeof(int));
}
}
//int j;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
theArray[i*blocksize+j][k]=0;
}
}
}
return theArray;
}
int** Changeto2DVariableIntArray(int** theArray,int rows, int blocks, int blocksize, int* columns)
{
int** NewArray=Make2DVariableIntArray(rows,blocks,blocksize,columns);
int i, j, k;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
NewArray[i*blocksize+j][k]=theArray[i*blocksize+j][k];
}
}
}
printf("changed to multiple matrixes");
return NewArray;
}
void init_zeros(int** matrix, int N)
{
int i,j;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
matrix[i][j]=0;
}
}
}
void printmat(int** matrix, int N, int Nj)
{
int i,j;
for (i=0;i<N;i++)
{
printf("\n");
for (j=0;j<N;j++)
{
printf("%d ",matrix[i][j]);
}
}
printf("\n");
}
void printtofile(int** matrix, int K, char* filename)
{
/*
Prints original 2D matrices to file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i,j;
for (i=0;i<K;i++)
{
fprintf(fp, "\n");
for (j=0;j<K;j++)
{
fprintf(fp, "%d\t", matrix[i][j]);
}
}
}
void printtofile1D(int* matrix, int K, char* filename)
{
/*
Prints resultant matrix to a file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i;
for (i=0;i<K;i++)
{
fprintf(fp, "%d\n", matrix[i]);
}
}
int* Make1DIntArray(int arraySizeX)
{
int* theArray;
theArray = (int*)malloc(arraySizeX*sizeof(int));
int i;
for (i=0;i<arraySizeX;i++)
{
theArray[i]=0;
}
return theArray;
}
void freese(int sizeX, int sizeY, double** ptr)
{
int i;
for (i=0;i<sizeX;i++)
free(ptr[i]);
free(ptr);
}
int main()
{
int N=5000;
// const int Dsize=1000;
FILE *arr, *vec;
int i,j,maxrowwidth=0,tint=0;
int** a=Make2DIntArray(N,N);
// int* val=Make1DIntArray(Dsize);
// int* col=Make1DIntArray(Dsize);
// int* row=Make1DIntArray(Dsize);
int* result=Make1DIntArray(N);
int* vecX=Make1DIntArray(N);
int** scval=Make2DIntArray(N,N); //sell c value
int** sccol=Make2DIntArray(N,N); //sell c col
int* rowwidth=Make1DIntArray(N); //number of elements in each row
int* temp=Make1DIntArray(N);
int* rows=Make1DIntArray(N);
int* resultsordered=Make1DIntArray(N);
int sig=4,c=2;
// int* rowwidth=Make1DIntArray(N);
int *dev_vec, *dev_scval, *dev_result, *dev_sccol, *dev_cols, *dev_cs;
//int val[10],col[10],row[10];
arr=fopen("matrix5000.txt","r");
int k=0;
// struct timeval start, end;
// gettimeofday(&start, NULL);
//Reading the vector
vec=fopen("vector5000.txt","r");
for (i=0;i<N;i++)
{
fscanf(vec,"%d",&vecX[i]);
rows[i]=i;
}
//Reading the matrix
for(i=0;i<N;i++)
{
//printf("\n");
for(j=0;j<N;j++)
{
fscanf(arr,"%d",&a[i][j]);
// printf("%d ",a[i][j]);
}
}
printf("\n");
//row[i]=k;
//printf("\n k = %d\n ", k);
//sleep(10);
// gettimeofday(&end, NULL);
// double delta = ((end.tv_sec - start.tv_sec) * 1000000u +
// end.tv_usec - start.tv_usec) / 1.e6;
// printf("\nTime spent=%f\n", delta);
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(a[i][j]!=0)
{
scval[i][k]=a[i][j];
sccol[i][k]=j;
rowwidth[i]=k+1;
if(rowwidth[i]>maxrowwidth)
{
maxrowwidth=rowwidth[i];
}k++;
}
}
//printf("\nRow width %d = %d", i, rowwidth[i]);
k=0;
}
if(sig>1&&c!=sig)
{
for(i=0;i<N;i=i+sig)
{
for(k=0;k<sig-1;k++)
{
for(j=i;(j<i+sig-1) && (j<N);j++)
{
if(rowwidth[j]<rowwidth[j+1])
{
temp=scval[j];
scval[j]=scval[j+1];
scval[j+1]=temp;
temp=sccol[j];
sccol[j]=sccol[j+1];
sccol[j+1]=temp;
tint=rowwidth[j];
rowwidth[j]=rowwidth[j+1];
rowwidth[j+1]=tint;
tint=rows[j];
rows[j]=rows[j+1];
rows[j+1]=tint;
}
}
}
}
}
/* for(i=0;i<N;i++)
{
if(scval[i][0]==0)
{
break;
}
}
N=i;
*/
printf("\nmaxrowwidth=%d\n",maxrowwidth);
// printmat(scval,N,N);
// printtofile(scval,N,"scval.txt");
// printtofile(sccol,N,"sccol.txt");
/* printf("\n Shuffled rows is:\n");
for (i=0;i<N;i++)
{
printf("%d\n",rows[i]);
}
*/
//printmatscreen<<<1,1>>>(dev_b,N);
/* multiply<<<N,N>>>(dev_a, dev_b, dev_c, N, N);
cudaMemcpy(result, dev_c, sizeof(int)*N, cudaMemcpyDeviceToHost);
for (i=0;i<N;i++)
{
printf("\n%d",result[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// NEED TO FIGURE OUT A WAY TO POPULATE cols SO AS TO HAVE varmat CREATED PROPERLY. SYSTEM CRASHES OTHERWISE
*/
int* cols=Make1DIntArray(N/c);
j=0;
int colsum=0;
for(i=0;i<N;i=i+c)
{
cols[j]=rowwidth[i];
colsum+=cols[j];
j++;
}
int** varscval=Changeto2DVariableIntArray(scval,N,N/c,c,cols);
int** varsccol=Changeto2DVariableIntArray(sccol,N,N/c,c,cols);
/* for (i=0;i<N/c;i++)
{
for(j=0;j<c;j++)
{
printf("\n");
for (k=0;k<cols[i];k++)
{
printf("%d ",varscval[i*c+j][k]);
//printf("%d ",varsccol[i*c+j][k]);
}
}
}
*/
int varsize=colsum*c;
//flattening scval and sccol
int counters=0;
int* scval_flat=Make1DIntArray(varsize);
int* sccol_flat=Make1DIntArray(varsize);
int* cs=Make1DIntArray((N/c)+1);
cs[0]=0;
int countcols=0;
int z=0;
printf("\n");
printf("\n");
printf("\n");
for (i=0;i<N/c;i++)
{
countcols=0;
for(j=0;j<cols[i];j++)
{
for (k=0;k<c;k++)
{
scval_flat[counters]=varscval[i*c+k][j];
sccol_flat[counters]=varsccol[i*c+k][j];
//printf("%d ",scval_flat[counters]);
//printf("%d\n", sccol_flat[counters]);
counters=counters+1;
countcols=countcols+1;
}
}
cs[z+1]=cs[z]+countcols;
z=z+1;
}
/* printf("\ncs:");
for(i=1;i<(N/c)+1;i++)
printf("%d ", cs[i]);
/*
for(i=0;i<N/c;i++)
{
for(j=0;j<cols[i];j++)
{
result[i*c+0]+=scval_flat[cs[i]+(j*c)]*vecX[sccol_flat[cs[i]+(j*2)]];
result[i*c+1]+=scval_flat[cs[i]+(j*c)+1]*vecX[sccol_flat[cs[i]+(j*2)+1]];
}
}
printtofile1D(result,N,"resultstest.txt");
*/
cudaEvent_t start, stop, start_kernel, stop_kernel;
float time_kernel;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&start_kernel);
cudaEventCreate(&stop_kernel);
cudaMalloc((void**)&dev_vec, sizeof(int)*N);
cudaMalloc((void**)&dev_scval, sizeof(int)*varsize);
cudaMalloc((void**)&dev_result, sizeof(int)*N);
cudaMalloc((void**)&dev_sccol, sizeof(int)*varsize);
cudaMalloc((void**)&dev_cols, sizeof(int)*(N/c));
cudaMalloc((void**)&dev_cs, sizeof(int)*(N/c));
//cudaEventRecord(start,0);
cudaMemcpy(dev_vec, vecX, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_scval, scval_flat, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_result, result, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_sccol, sccol_flat, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cols, cols, sizeof(int)*(N/c), cudaMemcpyHostToDevice);
cudaMemcpy(dev_cs, cs, sizeof(int)*(N/c), cudaMemcpyHostToDevice);
cudaEventRecord(start_kernel,0);
multiply<<<N/c,c>>>(dev_scval, dev_sccol, dev_vec, dev_result, dev_cols, dev_cs);
//sleep(10);
cudaEventRecord(stop_kernel,0);
cudaMemcpy(result, dev_result, sizeof(int)*N, cudaMemcpyDeviceToHost);
//cudaEventRecord(stop,0);
cudaEventSynchronize(stop_kernel);
//cudaEventElapsedTime(&time, start, stop);
cudaEventElapsedTime(&time_kernel, start_kernel, stop_kernel);
//printf("\nTime for kernel with data transfer = %f ms \n", time);
printf("\nTime for kernel without data transfer = %f ms \n", time_kernel);
for (i=0;i<N;i++)
{
resultsordered[rows[i]]=result[i];
}
printtofile1D(resultsordered,N,"results.txt");
// CODE TO RESHUFFLE BACK
cudaFree(dev_vec);
cudaFree(dev_scval);
cudaFree(dev_result);
cudaFree(dev_sccol);
cudaFree(dev_cols);
return 0;
/*
cudaMalloc((void**)&dev_vec, sizeof(int)*N);
cudaMalloc((void**)&dev_scval_flat, sizeof(int)*varsize);
cudaMalloc((void**)&dev_result, sizeof(int)*N);
cudaMalloc((void**)&dev_sccol_flat, sizeof(int)*varsize);
cudaMemcpy(dev_a, vecX, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, varscval, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, result, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_d, varsccol, sizeof(int)*varsize, cudaMemcpyHostToDevice);
*/
} | .file "tmpxft_000fa3ee_00000000-6_SELL_1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z14Make2DIntArrayii
.type _Z14Make2DIntArrayii, @function
_Z14Make2DIntArrayii:
.LFB2070:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movl %edi, %ebx
movl %esi, %r13d
movslq %edi, %rbp
salq $3, %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, %r14
testl %ebx, %ebx
jle .L3
movslq %r13d, %r12
salq $2, %r12
movq %rax, %rbx
addq %rax, %rbp
movq %rax, %r15
.L5:
movq %r12, %rdi
call malloc@PLT
movq %rax, (%r15)
addq $8, %r15
cmpq %rbp, %r15
jne .L5
jmp .L6
.L8:
movq (%rbx), %rax
leaq (%r12,%rax), %rdx
.L7:
movl $0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L7
.L9:
addq $8, %rbx
cmpq %rbp, %rbx
je .L3
.L6:
testl %r13d, %r13d
jg .L8
jmp .L9
.L3:
movq %r14, %rax
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2070:
.size _Z14Make2DIntArrayii, .-_Z14Make2DIntArrayii
.globl _Z22Make2DVariableIntArrayiiiPi
.type _Z22Make2DVariableIntArrayiiiPi, @function
_Z22Make2DVariableIntArrayiiiPi:
.LFB2071:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movl %esi, %ebx
movl %edx, %r14d
movq %rcx, %r15
movslq %edi, %rdi
salq $3, %rdi
call malloc@PLT
movq %rax, %r12
testl %ebx, %ebx
jle .L13
movq %r15, (%rsp)
movslq %ebx, %rbx
leaq (%r15,%rbx,4), %rax
movq %rax, 8(%rsp)
movl $0, 20(%rsp)
movslq %r14d, %rax
movq %rax, 24(%rsp)
.L17:
movl (%r15), %eax
testl %r14d, %r14d
jle .L15
cltq
leaq 0(,%rax,4), %rbp
movslq 20(%rsp), %rax
leaq (%r12,%rax,8), %rbx
movq 24(%rsp), %rsi
addq %rsi, %rax
leaq (%r12,%rax,8), %r13
.L16:
movq %rbp, %rdi
call malloc@PLT
movq %rax, (%rbx)
addq $8, %rbx
cmpq %r13, %rbx
jne .L16
.L15:
addq $4, %r15
addl %r14d, 20(%rsp)
movq 8(%rsp), %rax
cmpq %rax, %r15
jne .L17
movl %r14d, %esi
movl $0, %r9d
jmp .L18
.L21:
movslq %ecx, %rax
movq (%r12,%rax,8), %rax
leaq (%r8,%rax), %rdx
.L19:
movl $0, (%rax)
addq $4, %rax
cmpq %rax, %rdx
jne .L19
.L22:
addl $1, %ecx
cmpl %esi, %ecx
je .L20
.L23:
testl %edi, %edi
jg .L21
jmp .L22
.L20:
addl %r14d, %esi
addl %r14d, %r9d
addq $4, (%rsp)
movq (%rsp), %rax
movq 8(%rsp), %rdi
cmpq %rdi, %rax
je .L13
.L18:
testl %r14d, %r14d
jle .L20
movq (%rsp), %rax
movl (%rax), %edi
movslq %edi, %r8
salq $2, %r8
movl %r9d, %ecx
jmp .L23
.L13:
movq %r12, %rax
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2071:
.size _Z22Make2DVariableIntArrayiiiPi, .-_Z22Make2DVariableIntArrayiiiPi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "changed to multiple matrixes"
.text
.globl _Z26Changeto2DVariableIntArrayPPiiiiS_
.type _Z26Changeto2DVariableIntArrayPPiiiiS_, @function
_Z26Changeto2DVariableIntArrayPPiiiiS_:
.LFB2072:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movq %rdi, %rbp
movl %esi, %edi
movl %edx, %r13d
movl %ecx, %r12d
movq %r8, %r14
movq %r8, %rcx
movl %r12d, %edx
movl %r13d, %esi
call _Z22Make2DVariableIntArrayiiiPi
movq %rax, %rbx
testl %r13d, %r13d
jle .L29
movq %r14, %r10
movslq %r13d, %r13
leaq (%r14,%r13,4), %r14
movl %r12d, %r11d
movl $0, %r13d
jmp .L30
.L33:
movslq %r9d, %rsi
salq $3, %rsi
leaq 0(%rbp,%rsi), %r8
addq %rbx, %rsi
movl $0, %eax
.L31:
movq (%r8), %rdx
movl (%rdx,%rax,4), %ecx
movq (%rsi), %rdx
movl %ecx, (%rdx,%rax,4)
addq $1, %rax
cmpl %eax, (%rdi)
jg .L31
.L34:
addl $1, %r9d
cmpl %r9d, %r11d
je .L32
.L35:
movq %r10, %rdi
cmpl $0, (%r10)
jg .L33
jmp .L34
.L32:
addl %r12d, %r11d
addl %r12d, %r13d
addq $4, %r10
cmpq %r14, %r10
je .L29
.L30:
movl %r13d, %r9d
testl %r12d, %r12d
jg .L35
jmp .L32
.L29:
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rax
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2072:
.size _Z26Changeto2DVariableIntArrayPPiiiiS_, .-_Z26Changeto2DVariableIntArrayPPiiiiS_
.globl _Z10init_zerosPPii
.type _Z10init_zerosPPii, @function
_Z10init_zerosPPii:
.LFB2073:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L40
movq %rdi, %rcx
movslq %esi, %rsi
leaq (%rdi,%rsi,8), %rdi
salq $2, %rsi
.L42:
movl $0, %eax
.L43:
movq (%rcx), %rdx
movl $0, (%rdx,%rax)
addq $4, %rax
cmpq %rsi, %rax
jne .L43
addq $8, %rcx
cmpq %rdi, %rcx
jne .L42
.L40:
ret
.cfi_endproc
.LFE2073:
.size _Z10init_zerosPPii, .-_Z10init_zerosPPii
.section .rodata.str1.1
.LC1:
.string "\n"
.LC2:
.string "%d "
.text
.globl _Z8printmatPPiii
.type _Z8printmatPPiii, @function
_Z8printmatPPiii:
.LFB2074:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
testl %esi, %esi
jle .L46
movq %rdi, %rbp
movslq %esi, %rsi
leaq (%rdi,%rsi,8), %r15
leaq 0(,%rsi,4), %r12
leaq .LC1(%rip), %r14
leaq .LC2(%rip), %r13
.L48:
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %ebx
.L47:
movq 0(%rbp), %rax
movl (%rax,%rbx), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L47
addq $8, %rbp
cmpq %r15, %rbp
jne .L48
.L46:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2074:
.size _Z8printmatPPiii, .-_Z8printmatPPiii
.section .rodata.str1.1
.LC3:
.string "wt"
.LC4:
.string "%d\t"
.text
.globl _Z11printtofilePPiiPc
.type _Z11printtofilePPiiPc, @function
_Z11printtofilePPiiPc:
.LFB2075:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r13
movl %esi, %ebx
movq %rdx, %rdi
leaq .LC3(%rip), %rsi
call fopen@PLT
testl %ebx, %ebx
jle .L52
movq %rax, %r12
movq %r13, %rbp
movslq %ebx, %rbx
leaq 0(%r13,%rbx,8), %r15
leaq 0(,%rbx,4), %r13
leaq .LC4(%rip), %r14
.L55:
leaq .LC1(%rip), %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %ebx
.L54:
movq 0(%rbp), %rax
movl (%rax,%rbx), %ecx
movq %r14, %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
addq $4, %rbx
cmpq %r13, %rbx
jne .L54
addq $8, %rbp
cmpq %r15, %rbp
jne .L55
.L52:
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2075:
.size _Z11printtofilePPiiPc, .-_Z11printtofilePPiiPc
.section .rodata.str1.1
.LC5:
.string "%d\n"
.text
.globl _Z13printtofile1DPiiPc
.type _Z13printtofile1DPiiPc, @function
_Z13printtofile1DPiiPc:
.LFB2076:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %r13
movl %esi, %r12d
movq %rdx, %rdi
leaq .LC3(%rip), %rsi
call fopen@PLT
testl %r12d, %r12d
jle .L59
movq %rax, %rbp
movq %r13, %rbx
movslq %r12d, %r12
leaq 0(%r13,%r12,4), %r13
leaq .LC5(%rip), %r12
.L61:
movl (%rbx), %ecx
movq %r12, %rdx
movl $2, %esi
movq %rbp, %rdi
movl $0, %eax
call __fprintf_chk@PLT
addq $4, %rbx
cmpq %r13, %rbx
jne .L61
.L59:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2076:
.size _Z13printtofile1DPiiPc, .-_Z13printtofile1DPiiPc
.globl _Z14Make1DIntArrayi
.type _Z14Make1DIntArrayi, @function
_Z14Make1DIntArrayi:
.LFB2077:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movl %edi, %ebp
movslq %edi, %rcx
leaq 0(,%rcx,4), %rbx
movq %rbx, %rdi
call malloc@PLT
testl %ebp, %ebp
jle .L64
movq %rax, %rdx
leaq (%rbx,%rax), %rcx
.L66:
movl $0, (%rdx)
addq $4, %rdx
cmpq %rcx, %rdx
jne .L66
.L64:
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2077:
.size _Z14Make1DIntArrayi, .-_Z14Make1DIntArrayi
.globl _Z6freeseiiPPd
.type _Z6freeseiiPPd, @function
_Z6freeseiiPPd:
.LFB2078:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdx, %r12
testl %edi, %edi
jle .L70
movq %rdx, %rbx
movslq %edi, %rdi
leaq (%rdx,%rdi,8), %rbp
.L71:
movq (%rbx), %rdi
call free@PLT
addq $8, %rbx
cmpq %rbp, %rbx
jne .L71
.L70:
movq %r12, %rdi
call free@PLT
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2078:
.size _Z6freeseiiPPd, .-_Z6freeseiiPPd
.globl _Z37__device_stub__Z8multiplyPiS_S_S_S_S_PiS_S_S_S_S_
.type _Z37__device_stub__Z8multiplyPiS_S_S_S_S_PiS_S_S_S_S_, @function
_Z37__device_stub__Z8multiplyPiS_S_S_S_S_PiS_S_S_S_S_:
.LFB2104:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L78
.L74:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L79
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L78:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z8multiplyPiS_S_S_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L74
.L79:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2104:
.size _Z37__device_stub__Z8multiplyPiS_S_S_S_S_PiS_S_S_S_S_, .-_Z37__device_stub__Z8multiplyPiS_S_S_S_S_PiS_S_S_S_S_
.globl _Z8multiplyPiS_S_S_S_S_
.type _Z8multiplyPiS_S_S_S_S_, @function
_Z8multiplyPiS_S_S_S_S_:
.LFB2105:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z8multiplyPiS_S_S_S_S_PiS_S_S_S_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2105:
.size _Z8multiplyPiS_S_S_S_S_, .-_Z8multiplyPiS_S_S_S_S_
.section .rodata.str1.1
.LC6:
.string "r"
.LC7:
.string "matrix5000.txt"
.LC8:
.string "vector5000.txt"
.LC9:
.string "%d"
.LC10:
.string "\nmaxrowwidth=%d\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC11:
.string "\nTime for kernel without data transfer = %f ms \n"
.section .rodata.str1.1
.LC12:
.string "results.txt"
.text
.globl main
.type main, @function
main:
.LFB2079:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $200, %rsp
.cfi_def_cfa_offset 256
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
movl $5000, %esi
movl $5000, %edi
call _Z14Make2DIntArrayii
movq %rax, 24(%rsp)
movl $5000, %edi
call _Z14Make1DIntArrayi
movq %rax, 40(%rsp)
movl $5000, %edi
call _Z14Make1DIntArrayi
movq %rax, %r14
movq %rax, 64(%rsp)
movl $5000, %esi
movl $5000, %edi
call _Z14Make2DIntArrayii
movq %rax, %r12
movl $5000, %esi
movl $5000, %edi
call _Z14Make2DIntArrayii
movq %rax, 8(%rsp)
movl $5000, %edi
call _Z14Make1DIntArrayi
movq %rax, 16(%rsp)
movl $5000, %edi
call _Z14Make1DIntArrayi
movl $5000, %edi
call _Z14Make1DIntArrayi
movq %rax, 32(%rsp)
movl $5000, %edi
call _Z14Make1DIntArrayi
movq %rax, 48(%rsp)
leaq .LC6(%rip), %r13
movq %r13, %rsi
leaq .LC7(%rip), %rdi
call fopen@PLT
movq %rax, %rbx
movq %r13, %rsi
leaq .LC8(%rip), %rdi
call fopen@PLT
movq %rax, %r15
movl $0, %r13d
leaq .LC9(%rip), %rbp
movq %r12, 56(%rsp)
movq 32(%rsp), %r12
.L83:
movq %r14, %rdx
movq %rbp, %rsi
movq %r15, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movl %r13d, (%r12,%r13,4)
addq $1, %r13
addq $4, %r14
cmpq $5000, %r13
jne .L83
movq 24(%rsp), %rax
movq %rax, %r14
leaq 40000(%rax), %rsi
movq %rsi, 24(%rsp)
leaq .LC9(%rip), %r15
movq %rax, %r12
movq %rsi, %rbp
.L84:
movl $0, %r13d
.L85:
movq %r13, %rdx
addq (%r12), %rdx
movq %r15, %rsi
movq %rbx, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
addq $4, %r13
cmpq $20000, %r13
jne .L85
addq $8, %r12
cmpq %rbp, %r12
jne .L84
movq 56(%rsp), %r12
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r12, %r11
movq 8(%rsp), %r10
movq 16(%rsp), %r9
movq %r9, %rax
movl $0, %esi
movq 24(%rsp), %r8
jmp .L87
.L88:
addq $1, %rdx
cmpq $5000, %rdx
je .L118
.L89:
movq (%r14), %rdi
movl (%rdi,%rdx,4), %edi
testl %edi, %edi
je .L88
movslq %ecx, %rbx
movq (%r11), %rbp
movl %edi, 0(%rbp,%rbx,4)
movq (%r10), %rdi
movl %edx, (%rdi,%rbx,4)
addl $1, %ecx
movl %ecx, (%r9)
cmpl %ecx, %esi
cmovl %ecx, %esi
jmp .L88
.L118:
addq $8, %r14
addq $8, %r11
addq $8, %r10
addq $4, %r9
cmpq %r8, %r14
je .L107
.L87:
movl $0, %edx
movl $0, %ecx
jmp .L89
.L91:
movl %edx, %edi
addq $1, %rdx
addq $4, %rcx
cmpl $4999, %edi
jg .L109
cmpl %r9d, %edi
jge .L109
.L92:
movl 4(%rcx), %edi
cmpl %edi, (%rcx)
jge .L91
movq -8(%r12,%rdx,8), %rdi
movq (%r12,%rdx,8), %rbp
movq %rbp, -8(%r12,%rdx,8)
movq %rdi, (%r12,%rdx,8)
movq -8(%r8,%rdx,8), %rdi
movq (%r8,%rdx,8), %rbp
movq %rbp, -8(%r8,%rdx,8)
movq %rdi, (%r8,%rdx,8)
movl (%rcx), %edi
movl 4(%rcx), %ebp
movl %ebp, (%rcx)
movl %edi, 4(%rcx)
movl -4(%r11,%rdx,4), %edi
movl (%r11,%rdx,4), %ebp
movl %ebp, -4(%r11,%rdx,4)
movl %edi, (%r11,%rdx,4)
jmp .L91
.L109:
subl $1, %ebx
je .L94
.L96:
movq %rax, %rcx
movq %r10, %rdx
jmp .L92
.L94:
addl $4, %r9d
addq $4, %r10
addq $16, %rax
cmpq $5001, %r10
je .L119
.L90:
movl $3, %ebx
jmp .L96
.L107:
movl $1, %r10d
movl $3, %r9d
movq 8(%rsp), %r8
movq 32(%rsp), %r11
jmp .L90
.L119:
movl %esi, %edx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2500, %edi
call _Z14Make1DIntArrayi
movq %rax, %r15
movl $0, %eax
movl $0, %ebx
movq 16(%rsp), %rcx
.L97:
movl (%rcx,%rax,8), %edx
movl %edx, (%r15,%rax,4)
addl %edx, %ebx
addq $1, %rax
cmpq $2500, %rax
jne .L97
movq %r15, %r8
movl $2, %ecx
movl $2500, %edx
movl $5000, %esi
movq %r12, %rdi
call _Z26Changeto2DVariableIntArrayPPiiiiS_
movq %rax, %r12
movq %r15, %r8
movl $2, %ecx
movl $2500, %edx
movl $5000, %esi
movq 8(%rsp), %rdi
call _Z26Changeto2DVariableIntArrayPPiiiiS_
movq %rax, %r13
addl %ebx, %ebx
movl %ebx, 76(%rsp)
movl %ebx, %edi
call _Z14Make1DIntArrayi
movq %rax, %rbp
movl %ebx, %edi
call _Z14Make1DIntArrayi
movq %rax, %rbx
movl $2501, %edi
call _Z14Make1DIntArrayi
movq %rax, 56(%rsp)
movl $0, (%rax)
leaq .LC1(%rip), %r14
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 56(%rsp), %rax
leaq 4(%rax), %rsi
movl $0, %r10d
movl $0, %r9d
movl $0, %r14d
movq %r15, %rax
movq %r15, %r11
jmp .L98
.L120:
addl $2, %r9d
addq $1, %r8
leaq 2(%rcx), %rax
cmpl %r8d, (%r15)
jle .L100
movq %rax, %rcx
.L103:
leaq 0(,%r8,4), %rsi
leaq -2(%rcx), %rax
movq %r10, %rdx
.L99:
movq (%r12,%rdx), %rdi
movl (%rdi,%rsi), %edi
movl %edi, 0(%rbp,%rax,4)
movq 0(%r13,%rdx), %rdi
movl (%rdi,%rsi), %edi
movl %edi, (%rbx,%rax,4)
addq $1, %rax
addq $8, %rdx
cmpq %rax, %rcx
jne .L99
jmp .L120
.L100:
movq 8(%rsp), %rsi
movq 16(%rsp), %rdi
movq 24(%rsp), %rax
subl %edi, %ecx
.L102:
addl -4(%rsi), %ecx
movl %ecx, (%rsi)
addq $4, %rax
addq $16, %r10
addq $4, %rsi
cmpq $40000, %r10
je .L101
.L98:
movq %rax, %r15
cmpl $0, (%rax)
jle .L108
movslq %r9d, %rdi
leaq 2(%rdi), %rcx
movl $0, %r8d
movq %rsi, 8(%rsp)
movq %rdi, 16(%rsp)
movq %rax, 24(%rsp)
jmp .L103
.L108:
movl %r14d, %ecx
jmp .L102
.L101:
movq %r11, %r15
leaq 128(%rsp), %rdi
call cudaEventCreate@PLT
leaq 136(%rsp), %rdi
call cudaEventCreate@PLT
leaq 144(%rsp), %rdi
call cudaEventCreate@PLT
leaq 152(%rsp), %rdi
call cudaEventCreate@PLT
leaq 80(%rsp), %rdi
movl $20000, %esi
call cudaMalloc@PLT
movslq 76(%rsp), %r12
salq $2, %r12
leaq 88(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
leaq 96(%rsp), %rdi
movl $20000, %esi
call cudaMalloc@PLT
leaq 104(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
leaq 112(%rsp), %rdi
movl $10000, %esi
call cudaMalloc@PLT
leaq 120(%rsp), %rdi
movl $10000, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $20000, %edx
movq 64(%rsp), %rsi
movq 80(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r12, %rdx
movq %rbp, %rsi
movq 88(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $20000, %edx
movq 40(%rsp), %rsi
movq 96(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r12, %rdx
movq %rbx, %rsi
movq 104(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $10000, %edx
movq %r15, %rsi
movq 112(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $10000, %edx
movq 56(%rsp), %rsi
movq 120(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 144(%rsp), %rdi
call cudaEventRecord@PLT
movl $2, 172(%rsp)
movl $1, 176(%rsp)
movl $1, 180(%rsp)
movl $2500, 160(%rsp)
movl $1, 164(%rsp)
movl $1, 168(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 172(%rsp), %rdx
movl $1, %ecx
movq 160(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L121
.L104:
movl $0, %esi
movq 152(%rsp), %rdi
call cudaEventRecord@PLT
movl $2, %ecx
movl $20000, %edx
movq 96(%rsp), %rsi
movq 40(%rsp), %rbx
movq %rbx, %rdi
call cudaMemcpy@PLT
movq 152(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 172(%rsp), %rdi
movq 152(%rsp), %rdx
movq 144(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 172(%rsp), %xmm0
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $0, %eax
movq %rbx, %rsi
movq 32(%rsp), %rdi
movq 48(%rsp), %r8
.L105:
movl (%rsi,%rax), %ecx
movslq (%rdi,%rax), %rdx
movl %ecx, (%r8,%rdx,4)
addq $4, %rax
cmpq $20000, %rax
jne .L105
leaq .LC12(%rip), %rdx
movl $5000, %esi
movq 48(%rsp), %rdi
call _Z13printtofile1DPiiPc
movq 80(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movq 96(%rsp), %rdi
call cudaFree@PLT
movq 104(%rsp), %rdi
call cudaFree@PLT
movq 112(%rsp), %rdi
call cudaFree@PLT
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L122
movl $0, %eax
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L121:
.cfi_restore_state
movq 120(%rsp), %r9
movq 112(%rsp), %r8
movq 96(%rsp), %rcx
movq 80(%rsp), %rdx
movq 104(%rsp), %rsi
movq 88(%rsp), %rdi
call _Z37__device_stub__Z8multiplyPiS_S_S_S_S_PiS_S_S_S_S_
jmp .L104
.L122:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2079:
.size main, .-main
.globl _Z35__device_stub__Z14printmatscreenPiiPii
.type _Z35__device_stub__Z14printmatscreenPiiPii, @function
_Z35__device_stub__Z14printmatscreenPiiPii:
.LFB2106:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L127
.L123:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L128
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L127:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z14printmatscreenPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L123
.L128:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2106:
.size _Z35__device_stub__Z14printmatscreenPiiPii, .-_Z35__device_stub__Z14printmatscreenPiiPii
.globl _Z14printmatscreenPii
.type _Z14printmatscreenPii, @function
_Z14printmatscreenPii:
.LFB2107:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z14printmatscreenPiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2107:
.size _Z14printmatscreenPii, .-_Z14printmatscreenPii
.section .rodata.str1.1
.LC13:
.string "_Z14printmatscreenPii"
.LC14:
.string "_Z8multiplyPiS_S_S_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2109:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z14printmatscreenPii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z8multiplyPiS_S_S_S_S_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2109:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<stdio.h>
#include<math.h>
#include<stdlib.h>
//#include<cuda.h>
#include<unistd.h>
#include<time.h>
/*
for(i=0;i<N/c;i++)
{
for(j=0;j<cols[i];j++)
{
result[i*c+0]+=scval_flat[cs[i]+(j*c)]*vecX[sccol_flat[cs[i]+(j*2)]];
result[i*c+1]+=scval_flat[cs[i]+(j*c)+1]*vecX[sccol_flat[cs[i]+(j*2)+1]];
}
}
*/
__global__ void multiply(int *scval, int *sccol, int *vec, int *result, int *cols, int *cs)
{
int tid=blockIdx.x;
int sum1=0;
int j;
//int colidx=tid/2;
//printf("\n tid= %d", tid);
//printf("\n Writing to %d",tid*c+threadIdx.x);
for(j=0;j<cols[tid];j++)
{
sum1 += scval[cs[tid]+(j*blockDim.x)+threadIdx.x]*vec[sccol[cs[tid]+(j*blockDim.x)+threadIdx.x]];
// sum2 += scval[cs[tid]+(j*2)+1]*vec[sccol[cs[tid]+(j*2)+1]];
}
__syncthreads();
result[tid*blockDim.x+threadIdx.x]=sum1;
// result[tid*c+1]=sum2;
}
__global__ void printmatscreen(int* mat, int N)
{
int i;
for (i=0;i<N;i++)
{
printf("%d ",mat[i]);
}
printf("\n");
}
int** Make2DIntArray(int arraySizeX, int arraySizeY)
{
int** theArray;
theArray = (int**) malloc(arraySizeX*sizeof(int*));
int i;
for (i = 0; i < arraySizeX; i++)
theArray[i] = (int*) malloc(arraySizeY*sizeof(int));
int j;
for (i=0;i<arraySizeX;i++)
{
for (j=0;j<arraySizeY;j++)
{
theArray[i][j]=0;
}
}
return theArray;
}
int** Make2DVariableIntArray(int rows, int blocks, int blocksize, int* columns)
{
int** theArray;
theArray = (int**) malloc(rows*sizeof(int*));
int i, j, k;
for (i = 0; i < blocks; i++)
{
k=columns[i];
for (j=0; j < blocksize; j++)
{
theArray[i*blocksize+j] = (int*) malloc(k*sizeof(int));
}
}
//int j;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
theArray[i*blocksize+j][k]=0;
}
}
}
return theArray;
}
int** Changeto2DVariableIntArray(int** theArray,int rows, int blocks, int blocksize, int* columns)
{
int** NewArray=Make2DVariableIntArray(rows,blocks,blocksize,columns);
int i, j, k;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
NewArray[i*blocksize+j][k]=theArray[i*blocksize+j][k];
}
}
}
printf("changed to multiple matrixes");
return NewArray;
}
void init_zeros(int** matrix, int N)
{
int i,j;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
matrix[i][j]=0;
}
}
}
void printmat(int** matrix, int N, int Nj)
{
int i,j;
for (i=0;i<N;i++)
{
printf("\n");
for (j=0;j<N;j++)
{
printf("%d ",matrix[i][j]);
}
}
printf("\n");
}
void printtofile(int** matrix, int K, char* filename)
{
/*
Prints original 2D matrices to file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i,j;
for (i=0;i<K;i++)
{
fprintf(fp, "\n");
for (j=0;j<K;j++)
{
fprintf(fp, "%d\t", matrix[i][j]);
}
}
}
void printtofile1D(int* matrix, int K, char* filename)
{
/*
Prints resultant matrix to a file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i;
for (i=0;i<K;i++)
{
fprintf(fp, "%d\n", matrix[i]);
}
}
int* Make1DIntArray(int arraySizeX)
{
int* theArray;
theArray = (int*)malloc(arraySizeX*sizeof(int));
int i;
for (i=0;i<arraySizeX;i++)
{
theArray[i]=0;
}
return theArray;
}
void freese(int sizeX, int sizeY, double** ptr)
{
int i;
for (i=0;i<sizeX;i++)
free(ptr[i]);
free(ptr);
}
int main()
{
int N=5000;
// const int Dsize=1000;
FILE *arr, *vec;
int i,j,maxrowwidth=0,tint=0;
int** a=Make2DIntArray(N,N);
// int* val=Make1DIntArray(Dsize);
// int* col=Make1DIntArray(Dsize);
// int* row=Make1DIntArray(Dsize);
int* result=Make1DIntArray(N);
int* vecX=Make1DIntArray(N);
int** scval=Make2DIntArray(N,N); //sell c value
int** sccol=Make2DIntArray(N,N); //sell c col
int* rowwidth=Make1DIntArray(N); //number of elements in each row
int* temp=Make1DIntArray(N);
int* rows=Make1DIntArray(N);
int* resultsordered=Make1DIntArray(N);
int sig=4,c=2;
// int* rowwidth=Make1DIntArray(N);
int *dev_vec, *dev_scval, *dev_result, *dev_sccol, *dev_cols, *dev_cs;
//int val[10],col[10],row[10];
arr=fopen("matrix5000.txt","r");
int k=0;
// struct timeval start, end;
// gettimeofday(&start, NULL);
//Reading the vector
vec=fopen("vector5000.txt","r");
for (i=0;i<N;i++)
{
fscanf(vec,"%d",&vecX[i]);
rows[i]=i;
}
//Reading the matrix
for(i=0;i<N;i++)
{
//printf("\n");
for(j=0;j<N;j++)
{
fscanf(arr,"%d",&a[i][j]);
// printf("%d ",a[i][j]);
}
}
printf("\n");
//row[i]=k;
//printf("\n k = %d\n ", k);
//sleep(10);
// gettimeofday(&end, NULL);
// double delta = ((end.tv_sec - start.tv_sec) * 1000000u +
// end.tv_usec - start.tv_usec) / 1.e6;
// printf("\nTime spent=%f\n", delta);
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(a[i][j]!=0)
{
scval[i][k]=a[i][j];
sccol[i][k]=j;
rowwidth[i]=k+1;
if(rowwidth[i]>maxrowwidth)
{
maxrowwidth=rowwidth[i];
}k++;
}
}
//printf("\nRow width %d = %d", i, rowwidth[i]);
k=0;
}
if(sig>1&&c!=sig)
{
for(i=0;i<N;i=i+sig)
{
for(k=0;k<sig-1;k++)
{
for(j=i;(j<i+sig-1) && (j<N);j++)
{
if(rowwidth[j]<rowwidth[j+1])
{
temp=scval[j];
scval[j]=scval[j+1];
scval[j+1]=temp;
temp=sccol[j];
sccol[j]=sccol[j+1];
sccol[j+1]=temp;
tint=rowwidth[j];
rowwidth[j]=rowwidth[j+1];
rowwidth[j+1]=tint;
tint=rows[j];
rows[j]=rows[j+1];
rows[j+1]=tint;
}
}
}
}
}
/* for(i=0;i<N;i++)
{
if(scval[i][0]==0)
{
break;
}
}
N=i;
*/
printf("\nmaxrowwidth=%d\n",maxrowwidth);
// printmat(scval,N,N);
// printtofile(scval,N,"scval.txt");
// printtofile(sccol,N,"sccol.txt");
/* printf("\n Shuffled rows is:\n");
for (i=0;i<N;i++)
{
printf("%d\n",rows[i]);
}
*/
//printmatscreen<<<1,1>>>(dev_b,N);
/* multiply<<<N,N>>>(dev_a, dev_b, dev_c, N, N);
cudaMemcpy(result, dev_c, sizeof(int)*N, cudaMemcpyDeviceToHost);
for (i=0;i<N;i++)
{
printf("\n%d",result[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// NEED TO FIGURE OUT A WAY TO POPULATE cols SO AS TO HAVE varmat CREATED PROPERLY. SYSTEM CRASHES OTHERWISE
*/
int* cols=Make1DIntArray(N/c);
j=0;
int colsum=0;
for(i=0;i<N;i=i+c)
{
cols[j]=rowwidth[i];
colsum+=cols[j];
j++;
}
int** varscval=Changeto2DVariableIntArray(scval,N,N/c,c,cols);
int** varsccol=Changeto2DVariableIntArray(sccol,N,N/c,c,cols);
/* for (i=0;i<N/c;i++)
{
for(j=0;j<c;j++)
{
printf("\n");
for (k=0;k<cols[i];k++)
{
printf("%d ",varscval[i*c+j][k]);
//printf("%d ",varsccol[i*c+j][k]);
}
}
}
*/
int varsize=colsum*c;
//flattening scval and sccol
int counters=0;
int* scval_flat=Make1DIntArray(varsize);
int* sccol_flat=Make1DIntArray(varsize);
int* cs=Make1DIntArray((N/c)+1);
cs[0]=0;
int countcols=0;
int z=0;
printf("\n");
printf("\n");
printf("\n");
for (i=0;i<N/c;i++)
{
countcols=0;
for(j=0;j<cols[i];j++)
{
for (k=0;k<c;k++)
{
scval_flat[counters]=varscval[i*c+k][j];
sccol_flat[counters]=varsccol[i*c+k][j];
//printf("%d ",scval_flat[counters]);
//printf("%d\n", sccol_flat[counters]);
counters=counters+1;
countcols=countcols+1;
}
}
cs[z+1]=cs[z]+countcols;
z=z+1;
}
/* printf("\ncs:");
for(i=1;i<(N/c)+1;i++)
printf("%d ", cs[i]);
/*
for(i=0;i<N/c;i++)
{
for(j=0;j<cols[i];j++)
{
result[i*c+0]+=scval_flat[cs[i]+(j*c)]*vecX[sccol_flat[cs[i]+(j*2)]];
result[i*c+1]+=scval_flat[cs[i]+(j*c)+1]*vecX[sccol_flat[cs[i]+(j*2)+1]];
}
}
printtofile1D(result,N,"resultstest.txt");
*/
cudaEvent_t start, stop, start_kernel, stop_kernel;
float time_kernel;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&start_kernel);
cudaEventCreate(&stop_kernel);
cudaMalloc((void**)&dev_vec, sizeof(int)*N);
cudaMalloc((void**)&dev_scval, sizeof(int)*varsize);
cudaMalloc((void**)&dev_result, sizeof(int)*N);
cudaMalloc((void**)&dev_sccol, sizeof(int)*varsize);
cudaMalloc((void**)&dev_cols, sizeof(int)*(N/c));
cudaMalloc((void**)&dev_cs, sizeof(int)*(N/c));
//cudaEventRecord(start,0);
cudaMemcpy(dev_vec, vecX, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_scval, scval_flat, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_result, result, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_sccol, sccol_flat, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cols, cols, sizeof(int)*(N/c), cudaMemcpyHostToDevice);
cudaMemcpy(dev_cs, cs, sizeof(int)*(N/c), cudaMemcpyHostToDevice);
cudaEventRecord(start_kernel,0);
multiply<<<N/c,c>>>(dev_scval, dev_sccol, dev_vec, dev_result, dev_cols, dev_cs);
//sleep(10);
cudaEventRecord(stop_kernel,0);
cudaMemcpy(result, dev_result, sizeof(int)*N, cudaMemcpyDeviceToHost);
//cudaEventRecord(stop,0);
cudaEventSynchronize(stop_kernel);
//cudaEventElapsedTime(&time, start, stop);
cudaEventElapsedTime(&time_kernel, start_kernel, stop_kernel);
//printf("\nTime for kernel with data transfer = %f ms \n", time);
printf("\nTime for kernel without data transfer = %f ms \n", time_kernel);
for (i=0;i<N;i++)
{
resultsordered[rows[i]]=result[i];
}
printtofile1D(resultsordered,N,"results.txt");
// CODE TO RESHUFFLE BACK
cudaFree(dev_vec);
cudaFree(dev_scval);
cudaFree(dev_result);
cudaFree(dev_sccol);
cudaFree(dev_cols);
return 0;
/*
cudaMalloc((void**)&dev_vec, sizeof(int)*N);
cudaMalloc((void**)&dev_scval_flat, sizeof(int)*varsize);
cudaMalloc((void**)&dev_result, sizeof(int)*N);
cudaMalloc((void**)&dev_sccol_flat, sizeof(int)*varsize);
cudaMemcpy(dev_a, vecX, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, varscval, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, result, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_d, varsccol, sizeof(int)*varsize, cudaMemcpyHostToDevice);
*/
} | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<math.h>
#include<stdlib.h>
//#include<cuda.h>
#include<unistd.h>
#include<time.h>
/*
for(i=0;i<N/c;i++)
{
for(j=0;j<cols[i];j++)
{
result[i*c+0]+=scval_flat[cs[i]+(j*c)]*vecX[sccol_flat[cs[i]+(j*2)]];
result[i*c+1]+=scval_flat[cs[i]+(j*c)+1]*vecX[sccol_flat[cs[i]+(j*2)+1]];
}
}
*/
__global__ void multiply(int *scval, int *sccol, int *vec, int *result, int *cols, int *cs)
{
int tid=blockIdx.x;
int sum1=0;
int j;
//int colidx=tid/2;
//printf("\n tid= %d", tid);
//printf("\n Writing to %d",tid*c+threadIdx.x);
for(j=0;j<cols[tid];j++)
{
sum1 += scval[cs[tid]+(j*blockDim.x)+threadIdx.x]*vec[sccol[cs[tid]+(j*blockDim.x)+threadIdx.x]];
// sum2 += scval[cs[tid]+(j*2)+1]*vec[sccol[cs[tid]+(j*2)+1]];
}
__syncthreads();
result[tid*blockDim.x+threadIdx.x]=sum1;
// result[tid*c+1]=sum2;
}
__global__ void printmatscreen(int* mat, int N)
{
int i;
for (i=0;i<N;i++)
{
printf("%d ",mat[i]);
}
printf("\n");
}
int** Make2DIntArray(int arraySizeX, int arraySizeY)
{
int** theArray;
theArray = (int**) malloc(arraySizeX*sizeof(int*));
int i;
for (i = 0; i < arraySizeX; i++)
theArray[i] = (int*) malloc(arraySizeY*sizeof(int));
int j;
for (i=0;i<arraySizeX;i++)
{
for (j=0;j<arraySizeY;j++)
{
theArray[i][j]=0;
}
}
return theArray;
}
int** Make2DVariableIntArray(int rows, int blocks, int blocksize, int* columns)
{
int** theArray;
theArray = (int**) malloc(rows*sizeof(int*));
int i, j, k;
for (i = 0; i < blocks; i++)
{
k=columns[i];
for (j=0; j < blocksize; j++)
{
theArray[i*blocksize+j] = (int*) malloc(k*sizeof(int));
}
}
//int j;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
theArray[i*blocksize+j][k]=0;
}
}
}
return theArray;
}
int** Changeto2DVariableIntArray(int** theArray,int rows, int blocks, int blocksize, int* columns)
{
int** NewArray=Make2DVariableIntArray(rows,blocks,blocksize,columns);
int i, j, k;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
NewArray[i*blocksize+j][k]=theArray[i*blocksize+j][k];
}
}
}
printf("changed to multiple matrixes");
return NewArray;
}
void init_zeros(int** matrix, int N)
{
int i,j;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
matrix[i][j]=0;
}
}
}
void printmat(int** matrix, int N, int Nj)
{
int i,j;
for (i=0;i<N;i++)
{
printf("\n");
for (j=0;j<N;j++)
{
printf("%d ",matrix[i][j]);
}
}
printf("\n");
}
void printtofile(int** matrix, int K, char* filename)
{
/*
Prints original 2D matrices to file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i,j;
for (i=0;i<K;i++)
{
fprintf(fp, "\n");
for (j=0;j<K;j++)
{
fprintf(fp, "%d\t", matrix[i][j]);
}
}
}
void printtofile1D(int* matrix, int K, char* filename)
{
/*
Prints resultant matrix to a file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i;
for (i=0;i<K;i++)
{
fprintf(fp, "%d\n", matrix[i]);
}
}
int* Make1DIntArray(int arraySizeX)
{
int* theArray;
theArray = (int*)malloc(arraySizeX*sizeof(int));
int i;
for (i=0;i<arraySizeX;i++)
{
theArray[i]=0;
}
return theArray;
}
void freese(int sizeX, int sizeY, double** ptr)
{
int i;
for (i=0;i<sizeX;i++)
free(ptr[i]);
free(ptr);
}
int main()
{
int N=5000;
// const int Dsize=1000;
FILE *arr, *vec;
int i,j,maxrowwidth=0,tint=0;
int** a=Make2DIntArray(N,N);
// int* val=Make1DIntArray(Dsize);
// int* col=Make1DIntArray(Dsize);
// int* row=Make1DIntArray(Dsize);
int* result=Make1DIntArray(N);
int* vecX=Make1DIntArray(N);
int** scval=Make2DIntArray(N,N); //sell c value
int** sccol=Make2DIntArray(N,N); //sell c col
int* rowwidth=Make1DIntArray(N); //number of elements in each row
int* temp=Make1DIntArray(N);
int* rows=Make1DIntArray(N);
int* resultsordered=Make1DIntArray(N);
int sig=4,c=2;
// int* rowwidth=Make1DIntArray(N);
int *dev_vec, *dev_scval, *dev_result, *dev_sccol, *dev_cols, *dev_cs;
//int val[10],col[10],row[10];
arr=fopen("matrix5000.txt","r");
int k=0;
// struct timeval start, end;
// gettimeofday(&start, NULL);
//Reading the vector
vec=fopen("vector5000.txt","r");
for (i=0;i<N;i++)
{
fscanf(vec,"%d",&vecX[i]);
rows[i]=i;
}
//Reading the matrix
for(i=0;i<N;i++)
{
//printf("\n");
for(j=0;j<N;j++)
{
fscanf(arr,"%d",&a[i][j]);
// printf("%d ",a[i][j]);
}
}
printf("\n");
//row[i]=k;
//printf("\n k = %d\n ", k);
//sleep(10);
// gettimeofday(&end, NULL);
// double delta = ((end.tv_sec - start.tv_sec) * 1000000u +
// end.tv_usec - start.tv_usec) / 1.e6;
// printf("\nTime spent=%f\n", delta);
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(a[i][j]!=0)
{
scval[i][k]=a[i][j];
sccol[i][k]=j;
rowwidth[i]=k+1;
if(rowwidth[i]>maxrowwidth)
{
maxrowwidth=rowwidth[i];
}k++;
}
}
//printf("\nRow width %d = %d", i, rowwidth[i]);
k=0;
}
if(sig>1&&c!=sig)
{
for(i=0;i<N;i=i+sig)
{
for(k=0;k<sig-1;k++)
{
for(j=i;(j<i+sig-1) && (j<N);j++)
{
if(rowwidth[j]<rowwidth[j+1])
{
temp=scval[j];
scval[j]=scval[j+1];
scval[j+1]=temp;
temp=sccol[j];
sccol[j]=sccol[j+1];
sccol[j+1]=temp;
tint=rowwidth[j];
rowwidth[j]=rowwidth[j+1];
rowwidth[j+1]=tint;
tint=rows[j];
rows[j]=rows[j+1];
rows[j+1]=tint;
}
}
}
}
}
/* for(i=0;i<N;i++)
{
if(scval[i][0]==0)
{
break;
}
}
N=i;
*/
printf("\nmaxrowwidth=%d\n",maxrowwidth);
// printmat(scval,N,N);
// printtofile(scval,N,"scval.txt");
// printtofile(sccol,N,"sccol.txt");
/* printf("\n Shuffled rows is:\n");
for (i=0;i<N;i++)
{
printf("%d\n",rows[i]);
}
*/
//printmatscreen<<<1,1>>>(dev_b,N);
/* multiply<<<N,N>>>(dev_a, dev_b, dev_c, N, N);
cudaMemcpy(result, dev_c, sizeof(int)*N, cudaMemcpyDeviceToHost);
for (i=0;i<N;i++)
{
printf("\n%d",result[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// NEED TO FIGURE OUT A WAY TO POPULATE cols SO AS TO HAVE varmat CREATED PROPERLY. SYSTEM CRASHES OTHERWISE
*/
int* cols=Make1DIntArray(N/c);
j=0;
int colsum=0;
for(i=0;i<N;i=i+c)
{
cols[j]=rowwidth[i];
colsum+=cols[j];
j++;
}
int** varscval=Changeto2DVariableIntArray(scval,N,N/c,c,cols);
int** varsccol=Changeto2DVariableIntArray(sccol,N,N/c,c,cols);
/* for (i=0;i<N/c;i++)
{
for(j=0;j<c;j++)
{
printf("\n");
for (k=0;k<cols[i];k++)
{
printf("%d ",varscval[i*c+j][k]);
//printf("%d ",varsccol[i*c+j][k]);
}
}
}
*/
int varsize=colsum*c;
//flattening scval and sccol
int counters=0;
int* scval_flat=Make1DIntArray(varsize);
int* sccol_flat=Make1DIntArray(varsize);
int* cs=Make1DIntArray((N/c)+1);
cs[0]=0;
int countcols=0;
int z=0;
printf("\n");
printf("\n");
printf("\n");
for (i=0;i<N/c;i++)
{
countcols=0;
for(j=0;j<cols[i];j++)
{
for (k=0;k<c;k++)
{
scval_flat[counters]=varscval[i*c+k][j];
sccol_flat[counters]=varsccol[i*c+k][j];
//printf("%d ",scval_flat[counters]);
//printf("%d\n", sccol_flat[counters]);
counters=counters+1;
countcols=countcols+1;
}
}
cs[z+1]=cs[z]+countcols;
z=z+1;
}
/* printf("\ncs:");
for(i=1;i<(N/c)+1;i++)
printf("%d ", cs[i]);
/*
for(i=0;i<N/c;i++)
{
for(j=0;j<cols[i];j++)
{
result[i*c+0]+=scval_flat[cs[i]+(j*c)]*vecX[sccol_flat[cs[i]+(j*2)]];
result[i*c+1]+=scval_flat[cs[i]+(j*c)+1]*vecX[sccol_flat[cs[i]+(j*2)+1]];
}
}
printtofile1D(result,N,"resultstest.txt");
*/
hipEvent_t start, stop, start_kernel, stop_kernel;
float time_kernel;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventCreate(&start_kernel);
hipEventCreate(&stop_kernel);
hipMalloc((void**)&dev_vec, sizeof(int)*N);
hipMalloc((void**)&dev_scval, sizeof(int)*varsize);
hipMalloc((void**)&dev_result, sizeof(int)*N);
hipMalloc((void**)&dev_sccol, sizeof(int)*varsize);
hipMalloc((void**)&dev_cols, sizeof(int)*(N/c));
hipMalloc((void**)&dev_cs, sizeof(int)*(N/c));
//cudaEventRecord(start,0);
hipMemcpy(dev_vec, vecX, sizeof(int)*N, hipMemcpyHostToDevice);
hipMemcpy(dev_scval, scval_flat, sizeof(int)*varsize, hipMemcpyHostToDevice);
hipMemcpy(dev_result, result, sizeof(int)*N, hipMemcpyHostToDevice);
hipMemcpy(dev_sccol, sccol_flat, sizeof(int)*varsize, hipMemcpyHostToDevice);
hipMemcpy(dev_cols, cols, sizeof(int)*(N/c), hipMemcpyHostToDevice);
hipMemcpy(dev_cs, cs, sizeof(int)*(N/c), hipMemcpyHostToDevice);
hipEventRecord(start_kernel,0);
multiply<<<N/c,c>>>(dev_scval, dev_sccol, dev_vec, dev_result, dev_cols, dev_cs);
//sleep(10);
hipEventRecord(stop_kernel,0);
hipMemcpy(result, dev_result, sizeof(int)*N, hipMemcpyDeviceToHost);
//cudaEventRecord(stop,0);
hipEventSynchronize(stop_kernel);
//cudaEventElapsedTime(&time, start, stop);
hipEventElapsedTime(&time_kernel, start_kernel, stop_kernel);
//printf("\nTime for kernel with data transfer = %f ms \n", time);
printf("\nTime for kernel without data transfer = %f ms \n", time_kernel);
for (i=0;i<N;i++)
{
resultsordered[rows[i]]=result[i];
}
printtofile1D(resultsordered,N,"results.txt");
// CODE TO RESHUFFLE BACK
hipFree(dev_vec);
hipFree(dev_scval);
hipFree(dev_result);
hipFree(dev_sccol);
hipFree(dev_cols);
return 0;
/*
cudaMalloc((void**)&dev_vec, sizeof(int)*N);
cudaMalloc((void**)&dev_scval_flat, sizeof(int)*varsize);
cudaMalloc((void**)&dev_result, sizeof(int)*N);
cudaMalloc((void**)&dev_sccol_flat, sizeof(int)*varsize);
cudaMemcpy(dev_a, vecX, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, varscval, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, result, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_d, varsccol, sizeof(int)*varsize, cudaMemcpyHostToDevice);
*/
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* Autores:
* Walter Martínez Santana
* José Carlos Castro
*
*Cholesky en Paralelo en CUDA
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
__global__ void multMatriz(float *da, float *db, float *dc, int num){
float sum=0;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
while(j<num){
while(i<num){
for (unsigned int k = 0; k<num; k++)
sum += da[i * num + k] * db[k * num + j];
dc[i*num + j] = (float) sum;
i += gridDim.y * blockDim.y;
}
j+=gridDim.x * blockDim.x;
i = threadIdx.y + blockIdx.y * blockDim.y;
}
}
__global__ void indices(){
int id=threadIdx.x + blockIdx.x*blockDim.x;
printf("blockdimy: %d threadx: %d Blockidx: %d blockdimx: %d id: %d raiz: %f\n",
blockDim.y,threadIdx.x , blockIdx.x,blockDim.x, id,sqrt((double)id));
__syncthreads();
}
__global__ void choleskyParalelo(float *db, int num){
int id=threadIdx.x + blockIdx.x*blockDim.x;
int x=0;
int inicio=0;
int k=0, N=num;
int id1=id+inicio, ids=id,id2;
int N2 = N;
int NN=0, KK=0;
while(k < N){
id1=id+inicio;
//Checamos si es un elemnto de la diagonal
if(id1 == inicio){
db[id1] = sqrt(db[id1]);
}else //si no es elemento de la diagonal, lo dividimos por el elemento diagonal de su columna
{
x=0;
while(id1 <N2){
while(x<1000)
x++;
__syncthreads();
db[id1] = db[id1]/db[inicio];
id1 += gridDim.x * blockDim.x;
__syncthreads();
}
//__syncthreads();//hacemos que todos los threads esperen a los que faltan
}__syncthreads();
//id=ids;
inicio += (N-k); //Preparo el siguiente salto al siguiente elemento diagonal
NN = N2; //Empiezo actaulizar valores de las columnas restantes a la actualizada
KK = k+1;//cada columna posterior tiene 1 elemento menos a la anterior
while(NN < (int)N*(N+1)/2){
id2=id + NN; // saltamos a la siguiente columna
while(id2 < NN + (N-KK)){
db[id2] = db[id2] -db[id + KK]* db[KK];
id2 += gridDim.x * blockDim.x;
__syncthreads();
}
//__syncthreads();
NN += (N-KK);
KK++;
}
//__syncthreads();
k++; //pasamos a la siguiente columna
N2 += (N-k); //Siguiente elemento diagonal
__syncthreads();
}
}
#define n 5
#define SIZE n*n*sizeof(float)
int main(){
int N=n,i,j;
float *A, *B, *C;
float *da, *db, *dc;
int m, P=1,U=6;
srand(time(NULL));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 dimGrid(16, 16);
dim3 dimBlock(16, 16);
A=(float *)malloc(SIZE);
B=(float *)malloc(SIZE);
C=(float *)malloc(SIZE);
for(m=0;m<N*N;m++){
A[m]=(float)P+(int)(((U-P+1.0)*rand())/(RAND_MAX+1.0));
//B[m]=(float)P+(int)(((U-P+1.0)*rand())/(RAND_MAX+1.0));
C[m]=(float)0;
}
//Transpuesta de A
for( i = 0;i<N;i++)
for(j=0;j<N;j++)
B[j + i*N] = A[i + j*N];
cudaMalloc((void**)&da, SIZE);
cudaMalloc((void**)&db, SIZE);
cudaMalloc((void**)&dc, SIZE);
cudaMemcpy(da,A, SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(db,B, SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dc,C, SIZE, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
multMatriz<<<dimGrid , dimBlock >>>(da,db,dc,N);
//cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(C,dc, SIZE, cudaMemcpyDeviceToHost);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
free(B);
//Optimizacion de memoria
//Almacenamos la parte debajo de la diagonal y la diagonal de la matriz
int nuevoSize = N*(N+1)/2;
j=0;
int k;
B=(float *)malloc(nuevoSize*sizeof(float));
for(m=0;m<N;m++){
for(k=m;k<N;k++){
B[j++]=C[m + N*k];
}
}
//Desplegar nuevo almacenamiento en arreglo unidimensional
for(m=0;m<nuevoSize;m++)
printf("%5.0f ",B[m]);
printf("\n\n");
/*
for(m=0;m<N*N;m++){
printf("%08.0f",A[m]);
printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
printf("\n\n");
for(m=0;m<N*N;m++){
printf("%08.0f",B[m]);
printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
printf("\n\n");
*/
int NN;
NN=n;
//for(m=0;m<NN*NN;m++){
//int NN=16;
for(m=0;m<NN;m++){
for(k=0;k<NN;k++){
printf("%05.0f",C[k + m*N]);
printf("%c",( ((m*N+k)%NN)<(NN-1) ) ? ' ':'\n');
//printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
}
printf("\n\n");
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("Tiempo %4.6f milseg\n\n",elapsedTime);
cudaMalloc((void**)&db, nuevoSize*sizeof(float));
cudaMemcpy(db,B, nuevoSize*sizeof(float), cudaMemcpyHostToDevice);
choleskyParalelo<<<1,512>>>(db,n);
cudaMemcpy(B,db, nuevoSize*sizeof(float), cudaMemcpyDeviceToHost);
printf("\n\n");
for(m=0;m<nuevoSize;m++)
printf("%4.4f ",B[m]);
printf("\n\n");
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(db);
free(B);
free(C);
free(A);
return 0;
} | .file "tmpxft_000ec699_00000000-6_multMatricesCuadradasSol.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z10multMatrizPfS_S_iPfS_S_i
.type _Z35__device_stub__Z10multMatrizPfS_S_iPfS_S_i, @function
_Z35__device_stub__Z10multMatrizPfS_S_iPfS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10multMatrizPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z35__device_stub__Z10multMatrizPfS_S_iPfS_S_i, .-_Z35__device_stub__Z10multMatrizPfS_S_iPfS_S_i
.globl _Z10multMatrizPfS_S_i
.type _Z10multMatrizPfS_S_i, @function
_Z10multMatrizPfS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z10multMatrizPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10multMatrizPfS_S_i, .-_Z10multMatrizPfS_S_i
.globl _Z25__device_stub__Z7indicesvv
.type _Z25__device_stub__Z7indicesvv, @function
_Z25__device_stub__Z7indicesvv:
.LFB2084:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z7indicesv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z25__device_stub__Z7indicesvv, .-_Z25__device_stub__Z7indicesvv
.globl _Z7indicesv
.type _Z7indicesv, @function
_Z7indicesv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z7indicesvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z7indicesv, .-_Z7indicesv
.globl _Z37__device_stub__Z16choleskyParaleloPfiPfi
.type _Z37__device_stub__Z16choleskyParaleloPfiPfi, @function
_Z37__device_stub__Z16choleskyParaleloPfiPfi:
.LFB2086:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z16choleskyParaleloPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z37__device_stub__Z16choleskyParaleloPfiPfi, .-_Z37__device_stub__Z16choleskyParaleloPfiPfi
.globl _Z16choleskyParaleloPfi
.type _Z16choleskyParaleloPfi, @function
_Z16choleskyParaleloPfi:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z16choleskyParaleloPfiPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z16choleskyParaleloPfi, .-_Z16choleskyParaleloPfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string "%5.0f "
.LC5:
.string "\n\n"
.LC6:
.string "%05.0f"
.LC7:
.string "%c"
.LC8:
.string "Tiempo %4.6f milseg\n\n"
.LC9:
.string "%4.4f "
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $152, %rsp
.cfi_def_cfa_offset 208
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
leaq 72(%rsp), %rdi
call cudaEventCreate@PLT
leaq 80(%rsp), %rdi
call cudaEventCreate@PLT
movl $16, 88(%rsp)
movl $16, 92(%rsp)
movl $1, 96(%rsp)
movl $16, 100(%rsp)
movl $16, 104(%rsp)
movl $1, 108(%rsp)
movl $100, %edi
call malloc@PLT
movq %rax, %r12
movq %rax, 16(%rsp)
movl $100, %edi
call malloc@PLT
movq %rax, %rbp
movl $100, %edi
call malloc@PLT
movq %rax, %r13
movl $0, %ebx
.L28:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
mulsd .LC0(%rip), %xmm0
mulsd .LC1(%rip), %xmm0
cvttsd2sil %xmm0, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
addss .LC2(%rip), %xmm0
movss %xmm0, (%r12,%rbx)
movl $0x00000000, 0(%r13,%rbx)
addq $4, %rbx
cmpq $100, %rbx
jne .L28
movq %rbp, %rdi
movq 16(%rsp), %rax
leaq 100(%rax), %rcx
movl $0, %esi
.L29:
leaq -100(%rcx), %rax
movq %rdi, %rdx
.L30:
movss (%rax), %xmm0
movss %xmm0, (%rdx)
addq $20, %rax
addq $4, %rdx
cmpq %rcx, %rax
jne .L30
addl $1, %esi
addq $20, %rdi
addq $4, %rcx
cmpl $5, %esi
jne .L29
leaq 48(%rsp), %rdi
movl $100, %esi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movl $100, %esi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movl $100, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $100, %edx
movq 16(%rsp), %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $100, %edx
movq %rbp, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $100, %edx
movq %r13, %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movl 108(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 100(%rsp), %rdx
movq 88(%rsp), %rdi
movl 96(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L52
.L32:
movl $0, %esi
movq 80(%rsp), %rdi
call cudaEventRecord@PLT
movq 80(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $2, %ecx
movl $100, %edx
movq 64(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movl $60, %edi
call malloc@PLT
movq %rax, %r9
movq %rax, 8(%rsp)
movq %r13, %r12
movq %r13, %r8
movl $0, %edi
movl $0, %esi
movl $4, %r11d
leaq 4(%rax), %r10
.L33:
movl %edi, %ebx
movslq %esi, %rcx
leaq (%r9,%rcx,4), %rax
movq %r9, %rbp
movq %r11, %rdx
subq %rdi, %rdx
addq %rcx, %rdx
leaq (%r10,%rdx,4), %rcx
movq %r8, %rdx
.L34:
movss (%rdx), %xmm0
movss %xmm0, (%rax)
addq $20, %rdx
addq $4, %rax
cmpq %rcx, %rax
jne .L34
addl $5, %esi
subl %ebx, %esi
addq $1, %rdi
addq $24, %r8
cmpq $5, %rdi
jne .L33
movq 8(%rsp), %rbx
leaq 60(%rbx), %r14
leaq .LC4(%rip), %r15
.L36:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r15, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r14, %rbx
jne .L36
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 100(%r13), %rax
movl $0, %r15d
movq %rbp, 24(%rsp)
movl %r15d, %ebp
movq %r12, %r15
movq %rax, %r12
.L37:
movl $0, %ebx
.L39:
pxor %xmm0, %xmm0
cvtss2sd (%r15,%rbx,4), %xmm0
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leal 0(%rbp,%rbx), %edx
movslq %edx, %rax
imulq $1717986919, %rax, %rax
sarq $33, %rax
movl %edx, %ecx
sarl $31, %ecx
subl %ecx, %eax
leal (%rax,%rax,4), %eax
subl %eax, %edx
cmpl $3, %edx
movl $32, %edx
movl $10, %eax
cmovg %eax, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $5, %rbx
jne .L39
addl $5, %ebp
addq $20, %r15
cmpq %r12, %r15
jne .L37
movq 24(%rsp), %rbp
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 44(%rsp), %rdi
movq 80(%rsp), %rdx
movq 72(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 44(%rsp), %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq 56(%rsp), %rdi
movl $60, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $60, %edx
movq 8(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $512, 124(%rsp)
movl $1, 128(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 124(%rsp), %rdx
movl $1, %ecx
movq 112(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L41:
movl $2, %ecx
movl $60, %edx
movq 56(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC9(%rip), %rbx
.L42:
pxor %xmm0, %xmm0
cvtss2sd 0(%rbp), %xmm0
movq %rbx, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbp
cmpq %r14, %rbp
jne .L42
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 72(%rsp), %rdi
call cudaEventDestroy@PLT
movq 80(%rsp), %rdi
call cudaEventDestroy@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 16(%rsp), %rdi
call free@PLT
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L54
movl $0, %eax
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L52:
.cfi_restore_state
movl $5, %ecx
movq 64(%rsp), %rdx
movq 56(%rsp), %rsi
movq 48(%rsp), %rdi
call _Z35__device_stub__Z10multMatrizPfS_S_iPfS_S_i
jmp .L32
.L53:
movl $5, %esi
movq 56(%rsp), %rdi
call _Z37__device_stub__Z16choleskyParaleloPfiPfi
jmp .L41
.L54:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z16choleskyParaleloPfi"
.LC11:
.string "_Z7indicesv"
.LC12:
.string "_Z10multMatrizPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2089:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z16choleskyParaleloPfi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z7indicesv(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z10multMatrizPfS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1075314688
.align 8
.LC1:
.long 0
.long 1040187392
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 1065353216
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* Autores:
* Walter Martínez Santana
* José Carlos Castro
*
*Cholesky en Paralelo en CUDA
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
__global__ void multMatriz(float *da, float *db, float *dc, int num){
float sum=0;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
while(j<num){
while(i<num){
for (unsigned int k = 0; k<num; k++)
sum += da[i * num + k] * db[k * num + j];
dc[i*num + j] = (float) sum;
i += gridDim.y * blockDim.y;
}
j+=gridDim.x * blockDim.x;
i = threadIdx.y + blockIdx.y * blockDim.y;
}
}
__global__ void indices(){
int id=threadIdx.x + blockIdx.x*blockDim.x;
printf("blockdimy: %d threadx: %d Blockidx: %d blockdimx: %d id: %d raiz: %f\n",
blockDim.y,threadIdx.x , blockIdx.x,blockDim.x, id,sqrt((double)id));
__syncthreads();
}
__global__ void choleskyParalelo(float *db, int num){
int id=threadIdx.x + blockIdx.x*blockDim.x;
int x=0;
int inicio=0;
int k=0, N=num;
int id1=id+inicio, ids=id,id2;
int N2 = N;
int NN=0, KK=0;
while(k < N){
id1=id+inicio;
//Checamos si es un elemnto de la diagonal
if(id1 == inicio){
db[id1] = sqrt(db[id1]);
}else //si no es elemento de la diagonal, lo dividimos por el elemento diagonal de su columna
{
x=0;
while(id1 <N2){
while(x<1000)
x++;
__syncthreads();
db[id1] = db[id1]/db[inicio];
id1 += gridDim.x * blockDim.x;
__syncthreads();
}
//__syncthreads();//hacemos que todos los threads esperen a los que faltan
}__syncthreads();
//id=ids;
inicio += (N-k); //Preparo el siguiente salto al siguiente elemento diagonal
NN = N2; //Empiezo actaulizar valores de las columnas restantes a la actualizada
KK = k+1;//cada columna posterior tiene 1 elemento menos a la anterior
while(NN < (int)N*(N+1)/2){
id2=id + NN; // saltamos a la siguiente columna
while(id2 < NN + (N-KK)){
db[id2] = db[id2] -db[id + KK]* db[KK];
id2 += gridDim.x * blockDim.x;
__syncthreads();
}
//__syncthreads();
NN += (N-KK);
KK++;
}
//__syncthreads();
k++; //pasamos a la siguiente columna
N2 += (N-k); //Siguiente elemento diagonal
__syncthreads();
}
}
#define n 5
#define SIZE n*n*sizeof(float)
int main(){
int N=n,i,j;
float *A, *B, *C;
float *da, *db, *dc;
int m, P=1,U=6;
srand(time(NULL));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 dimGrid(16, 16);
dim3 dimBlock(16, 16);
A=(float *)malloc(SIZE);
B=(float *)malloc(SIZE);
C=(float *)malloc(SIZE);
for(m=0;m<N*N;m++){
A[m]=(float)P+(int)(((U-P+1.0)*rand())/(RAND_MAX+1.0));
//B[m]=(float)P+(int)(((U-P+1.0)*rand())/(RAND_MAX+1.0));
C[m]=(float)0;
}
//Transpuesta de A
for( i = 0;i<N;i++)
for(j=0;j<N;j++)
B[j + i*N] = A[i + j*N];
cudaMalloc((void**)&da, SIZE);
cudaMalloc((void**)&db, SIZE);
cudaMalloc((void**)&dc, SIZE);
cudaMemcpy(da,A, SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(db,B, SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dc,C, SIZE, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
multMatriz<<<dimGrid , dimBlock >>>(da,db,dc,N);
//cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(C,dc, SIZE, cudaMemcpyDeviceToHost);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
free(B);
//Optimizacion de memoria
//Almacenamos la parte debajo de la diagonal y la diagonal de la matriz
int nuevoSize = N*(N+1)/2;
j=0;
int k;
B=(float *)malloc(nuevoSize*sizeof(float));
for(m=0;m<N;m++){
for(k=m;k<N;k++){
B[j++]=C[m + N*k];
}
}
//Desplegar nuevo almacenamiento en arreglo unidimensional
for(m=0;m<nuevoSize;m++)
printf("%5.0f ",B[m]);
printf("\n\n");
/*
for(m=0;m<N*N;m++){
printf("%08.0f",A[m]);
printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
printf("\n\n");
for(m=0;m<N*N;m++){
printf("%08.0f",B[m]);
printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
printf("\n\n");
*/
int NN;
NN=n;
//for(m=0;m<NN*NN;m++){
//int NN=16;
for(m=0;m<NN;m++){
for(k=0;k<NN;k++){
printf("%05.0f",C[k + m*N]);
printf("%c",( ((m*N+k)%NN)<(NN-1) ) ? ' ':'\n');
//printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
}
printf("\n\n");
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("Tiempo %4.6f milseg\n\n",elapsedTime);
cudaMalloc((void**)&db, nuevoSize*sizeof(float));
cudaMemcpy(db,B, nuevoSize*sizeof(float), cudaMemcpyHostToDevice);
choleskyParalelo<<<1,512>>>(db,n);
cudaMemcpy(B,db, nuevoSize*sizeof(float), cudaMemcpyDeviceToHost);
printf("\n\n");
for(m=0;m<nuevoSize;m++)
printf("%4.4f ",B[m]);
printf("\n\n");
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(db);
free(B);
free(C);
free(A);
return 0;
} | /* Autores:
* Walter Martínez Santana
* José Carlos Castro
*
*Cholesky en Paralelo en CUDA
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
__global__ void multMatriz(float *da, float *db, float *dc, int num){
float sum=0;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
while(j<num){
while(i<num){
for (unsigned int k = 0; k<num; k++)
sum += da[i * num + k] * db[k * num + j];
dc[i*num + j] = (float) sum;
i += gridDim.y * blockDim.y;
}
j+=gridDim.x * blockDim.x;
i = threadIdx.y + blockIdx.y * blockDim.y;
}
}
__global__ void indices(){
int id=threadIdx.x + blockIdx.x*blockDim.x;
printf("blockdimy: %d threadx: %d Blockidx: %d blockdimx: %d id: %d raiz: %f\n",
blockDim.y,threadIdx.x , blockIdx.x,blockDim.x, id,sqrt((double)id));
__syncthreads();
}
__global__ void choleskyParalelo(float *db, int num){
int id=threadIdx.x + blockIdx.x*blockDim.x;
int x=0;
int inicio=0;
int k=0, N=num;
int id1=id+inicio, ids=id,id2;
int N2 = N;
int NN=0, KK=0;
while(k < N){
id1=id+inicio;
//Checamos si es un elemnto de la diagonal
if(id1 == inicio){
db[id1] = sqrt(db[id1]);
}else //si no es elemento de la diagonal, lo dividimos por el elemento diagonal de su columna
{
x=0;
while(id1 <N2){
while(x<1000)
x++;
__syncthreads();
db[id1] = db[id1]/db[inicio];
id1 += gridDim.x * blockDim.x;
__syncthreads();
}
//__syncthreads();//hacemos que todos los threads esperen a los que faltan
}__syncthreads();
//id=ids;
inicio += (N-k); //Preparo el siguiente salto al siguiente elemento diagonal
NN = N2; //Empiezo actaulizar valores de las columnas restantes a la actualizada
KK = k+1;//cada columna posterior tiene 1 elemento menos a la anterior
while(NN < (int)N*(N+1)/2){
id2=id + NN; // saltamos a la siguiente columna
while(id2 < NN + (N-KK)){
db[id2] = db[id2] -db[id + KK]* db[KK];
id2 += gridDim.x * blockDim.x;
__syncthreads();
}
//__syncthreads();
NN += (N-KK);
KK++;
}
//__syncthreads();
k++; //pasamos a la siguiente columna
N2 += (N-k); //Siguiente elemento diagonal
__syncthreads();
}
}
#define n 5
#define SIZE n*n*sizeof(float)
int main(){
int N=n,i,j;
float *A, *B, *C;
float *da, *db, *dc;
int m, P=1,U=6;
srand(time(NULL));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
dim3 dimGrid(16, 16);
dim3 dimBlock(16, 16);
A=(float *)malloc(SIZE);
B=(float *)malloc(SIZE);
C=(float *)malloc(SIZE);
for(m=0;m<N*N;m++){
A[m]=(float)P+(int)(((U-P+1.0)*rand())/(RAND_MAX+1.0));
//B[m]=(float)P+(int)(((U-P+1.0)*rand())/(RAND_MAX+1.0));
C[m]=(float)0;
}
//Transpuesta de A
for( i = 0;i<N;i++)
for(j=0;j<N;j++)
B[j + i*N] = A[i + j*N];
hipMalloc((void**)&da, SIZE);
hipMalloc((void**)&db, SIZE);
hipMalloc((void**)&dc, SIZE);
hipMemcpy(da,A, SIZE, hipMemcpyHostToDevice);
hipMemcpy(db,B, SIZE, hipMemcpyHostToDevice);
hipMemcpy(dc,C, SIZE, hipMemcpyHostToDevice);
hipEventRecord(start, 0);
multMatriz<<<dimGrid , dimBlock >>>(da,db,dc,N);
//cudaThreadSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipMemcpy(C,dc, SIZE, hipMemcpyDeviceToHost);
hipFree(da);
hipFree(db);
hipFree(dc);
free(B);
//Optimizacion de memoria
//Almacenamos la parte debajo de la diagonal y la diagonal de la matriz
int nuevoSize = N*(N+1)/2;
j=0;
int k;
B=(float *)malloc(nuevoSize*sizeof(float));
for(m=0;m<N;m++){
for(k=m;k<N;k++){
B[j++]=C[m + N*k];
}
}
//Desplegar nuevo almacenamiento en arreglo unidimensional
for(m=0;m<nuevoSize;m++)
printf("%5.0f ",B[m]);
printf("\n\n");
/*
for(m=0;m<N*N;m++){
printf("%08.0f",A[m]);
printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
printf("\n\n");
for(m=0;m<N*N;m++){
printf("%08.0f",B[m]);
printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
printf("\n\n");
*/
int NN;
NN=n;
//for(m=0;m<NN*NN;m++){
//int NN=16;
for(m=0;m<NN;m++){
for(k=0;k<NN;k++){
printf("%05.0f",C[k + m*N]);
printf("%c",( ((m*N+k)%NN)<(NN-1) ) ? ' ':'\n');
//printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
}
printf("\n\n");
float elapsedTime;
hipEventElapsedTime(&elapsedTime,start,stop);
printf("Tiempo %4.6f milseg\n\n",elapsedTime);
hipMalloc((void**)&db, nuevoSize*sizeof(float));
hipMemcpy(db,B, nuevoSize*sizeof(float), hipMemcpyHostToDevice);
choleskyParalelo<<<1,512>>>(db,n);
hipMemcpy(B,db, nuevoSize*sizeof(float), hipMemcpyDeviceToHost);
printf("\n\n");
for(m=0;m<nuevoSize;m++)
printf("%4.4f ",B[m]);
printf("\n\n");
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(db);
free(B);
free(C);
free(A);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* Autores:
* Walter Martínez Santana
* José Carlos Castro
*
*Cholesky en Paralelo en CUDA
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
__global__ void multMatriz(float *da, float *db, float *dc, int num){
float sum=0;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
while(j<num){
while(i<num){
for (unsigned int k = 0; k<num; k++)
sum += da[i * num + k] * db[k * num + j];
dc[i*num + j] = (float) sum;
i += gridDim.y * blockDim.y;
}
j+=gridDim.x * blockDim.x;
i = threadIdx.y + blockIdx.y * blockDim.y;
}
}
__global__ void indices(){
int id=threadIdx.x + blockIdx.x*blockDim.x;
printf("blockdimy: %d threadx: %d Blockidx: %d blockdimx: %d id: %d raiz: %f\n",
blockDim.y,threadIdx.x , blockIdx.x,blockDim.x, id,sqrt((double)id));
__syncthreads();
}
__global__ void choleskyParalelo(float *db, int num){
int id=threadIdx.x + blockIdx.x*blockDim.x;
int x=0;
int inicio=0;
int k=0, N=num;
int id1=id+inicio, ids=id,id2;
int N2 = N;
int NN=0, KK=0;
while(k < N){
id1=id+inicio;
//Checamos si es un elemnto de la diagonal
if(id1 == inicio){
db[id1] = sqrt(db[id1]);
}else //si no es elemento de la diagonal, lo dividimos por el elemento diagonal de su columna
{
x=0;
while(id1 <N2){
while(x<1000)
x++;
__syncthreads();
db[id1] = db[id1]/db[inicio];
id1 += gridDim.x * blockDim.x;
__syncthreads();
}
//__syncthreads();//hacemos que todos los threads esperen a los que faltan
}__syncthreads();
//id=ids;
inicio += (N-k); //Preparo el siguiente salto al siguiente elemento diagonal
NN = N2; //Empiezo actaulizar valores de las columnas restantes a la actualizada
KK = k+1;//cada columna posterior tiene 1 elemento menos a la anterior
while(NN < (int)N*(N+1)/2){
id2=id + NN; // saltamos a la siguiente columna
while(id2 < NN + (N-KK)){
db[id2] = db[id2] -db[id + KK]* db[KK];
id2 += gridDim.x * blockDim.x;
__syncthreads();
}
//__syncthreads();
NN += (N-KK);
KK++;
}
//__syncthreads();
k++; //pasamos a la siguiente columna
N2 += (N-k); //Siguiente elemento diagonal
__syncthreads();
}
}
#define n 5
#define SIZE n*n*sizeof(float)
int main(){
int N=n,i,j;
float *A, *B, *C;
float *da, *db, *dc;
int m, P=1,U=6;
srand(time(NULL));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
dim3 dimGrid(16, 16);
dim3 dimBlock(16, 16);
A=(float *)malloc(SIZE);
B=(float *)malloc(SIZE);
C=(float *)malloc(SIZE);
for(m=0;m<N*N;m++){
A[m]=(float)P+(int)(((U-P+1.0)*rand())/(RAND_MAX+1.0));
//B[m]=(float)P+(int)(((U-P+1.0)*rand())/(RAND_MAX+1.0));
C[m]=(float)0;
}
//Transpuesta de A
for( i = 0;i<N;i++)
for(j=0;j<N;j++)
B[j + i*N] = A[i + j*N];
hipMalloc((void**)&da, SIZE);
hipMalloc((void**)&db, SIZE);
hipMalloc((void**)&dc, SIZE);
hipMemcpy(da,A, SIZE, hipMemcpyHostToDevice);
hipMemcpy(db,B, SIZE, hipMemcpyHostToDevice);
hipMemcpy(dc,C, SIZE, hipMemcpyHostToDevice);
hipEventRecord(start, 0);
multMatriz<<<dimGrid , dimBlock >>>(da,db,dc,N);
//cudaThreadSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipMemcpy(C,dc, SIZE, hipMemcpyDeviceToHost);
hipFree(da);
hipFree(db);
hipFree(dc);
free(B);
//Optimizacion de memoria
//Almacenamos la parte debajo de la diagonal y la diagonal de la matriz
int nuevoSize = N*(N+1)/2;
j=0;
int k;
B=(float *)malloc(nuevoSize*sizeof(float));
for(m=0;m<N;m++){
for(k=m;k<N;k++){
B[j++]=C[m + N*k];
}
}
//Desplegar nuevo almacenamiento en arreglo unidimensional
for(m=0;m<nuevoSize;m++)
printf("%5.0f ",B[m]);
printf("\n\n");
/*
for(m=0;m<N*N;m++){
printf("%08.0f",A[m]);
printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
printf("\n\n");
for(m=0;m<N*N;m++){
printf("%08.0f",B[m]);
printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
printf("\n\n");
*/
int NN;
NN=n;
//for(m=0;m<NN*NN;m++){
//int NN=16;
for(m=0;m<NN;m++){
for(k=0;k<NN;k++){
printf("%05.0f",C[k + m*N]);
printf("%c",( ((m*N+k)%NN)<(NN-1) ) ? ' ':'\n');
//printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
}
printf("\n\n");
float elapsedTime;
hipEventElapsedTime(&elapsedTime,start,stop);
printf("Tiempo %4.6f milseg\n\n",elapsedTime);
hipMalloc((void**)&db, nuevoSize*sizeof(float));
hipMemcpy(db,B, nuevoSize*sizeof(float), hipMemcpyHostToDevice);
choleskyParalelo<<<1,512>>>(db,n);
hipMemcpy(B,db, nuevoSize*sizeof(float), hipMemcpyDeviceToHost);
printf("\n\n");
for(m=0;m<nuevoSize;m++)
printf("%4.4f ",B[m]);
printf("\n\n");
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(db);
free(B);
free(C);
free(A);
return 0;
} | .text
.file "multMatricesCuadradasSol.hip"
.globl _Z25__device_stub__multMatrizPfS_S_i # -- Begin function _Z25__device_stub__multMatrizPfS_S_i
.p2align 4, 0x90
.type _Z25__device_stub__multMatrizPfS_S_i,@function
_Z25__device_stub__multMatrizPfS_S_i: # @_Z25__device_stub__multMatrizPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10multMatrizPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z25__device_stub__multMatrizPfS_S_i, .Lfunc_end0-_Z25__device_stub__multMatrizPfS_S_i
.cfi_endproc
# -- End function
.globl _Z22__device_stub__indicesv # -- Begin function _Z22__device_stub__indicesv
.p2align 4, 0x90
.type _Z22__device_stub__indicesv,@function
_Z22__device_stub__indicesv: # @_Z22__device_stub__indicesv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z7indicesv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end1:
.size _Z22__device_stub__indicesv, .Lfunc_end1-_Z22__device_stub__indicesv
.cfi_endproc
# -- End function
.globl _Z31__device_stub__choleskyParaleloPfi # -- Begin function _Z31__device_stub__choleskyParaleloPfi
.p2align 4, 0x90
.type _Z31__device_stub__choleskyParaleloPfi,@function
_Z31__device_stub__choleskyParaleloPfi: # @_Z31__device_stub__choleskyParaleloPfi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z16choleskyParaleloPfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end2:
.size _Z31__device_stub__choleskyParaleloPfi, .Lfunc_end2-_Z31__device_stub__choleskyParaleloPfi
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0x4018000000000000 # double 6
.LCPI3_1:
.quad 0x3e00000000000000 # double 4.6566128730773926E-10
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI3_2:
.long 0x3f800000 # float 1
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
xorl %r12d, %r12d
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
leaq 32(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
callq hipEventCreate
movl $100, %edi
callq malloc
movq %rax, %rbx
movl $100, %edi
callq malloc
movq %rax, %r15
movl $100, %edi
callq malloc
movq %rax, %r14
.p2align 4, 0x90
.LBB3_1: # =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
mulsd .LCPI3_0(%rip), %xmm0
mulsd .LCPI3_1(%rip), %xmm0
cvttpd2dq %xmm0, %xmm0
cvtdq2ps %xmm0, %xmm0
addss .LCPI3_2(%rip), %xmm0
movss %xmm0, (%rbx,%r12,4)
movl $0, (%r14,%r12,4)
incq %r12
cmpq $25, %r12
jne .LBB3_1
# %bb.2: # %.preheader113.preheader
xorl %eax, %eax
movq %rbx, %rcx
movq %r15, %rdx
.p2align 4, 0x90
.LBB3_3: # %.preheader113
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
movq %rcx, %rsi
xorl %edi, %edi
.p2align 4, 0x90
.LBB3_4: # Parent Loop BB3_3 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rsi), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%rdx,%rdi,4)
incq %rdi
addq $20, %rsi
cmpq $5, %rdi
jne .LBB3_4
# %bb.5: # in Loop: Header=BB3_3 Depth=1
incq %rax
addq $20, %rdx
addq $4, %rcx
cmpq $5, %rax
jne .LBB3_3
# %bb.6:
leaq 40(%rsp), %rdi
movl $100, %r12d
movl $100, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $100, %esi
callq hipMalloc
leaq 24(%rsp), %rdi
movl $100, %esi
callq hipMalloc
movq 40(%rsp), %rdi
movl $100, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $100, %edx
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $100, %edx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $68719476752, %rdi # imm = 0x1000000010
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_8
# %bb.7:
movq 40(%rsp), %rax
movq 8(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 112(%rsp)
movq %rcx, 72(%rsp)
movq %rdx, 64(%rsp)
movl $5, 124(%rsp)
leaq 112(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rax
movq %rax, 144(%rsp)
leaq 124(%rsp), %rax
movq %rax, 152(%rsp)
leaq 96(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z10multMatrizPfS_S_i, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_8:
movq 16(%rsp), %rdi
xorl %r13d, %r13d
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movl $100, %edx
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
movq 40(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq %r15, %rdi
callq free
movl $60, %edi
callq malloc
movq %rax, %r15
movq %r14, %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB3_9: # %.preheader112
# =>This Loop Header: Depth=1
# Child Loop BB3_10 Depth 2
movslq %ecx, %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB3_10: # Parent Loop BB3_9 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rax,%rdx), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%r15,%rcx,4)
incq %rcx
addq $20, %rdx
cmpq %rdx, %r12
jne .LBB3_10
# %bb.11: # in Loop: Header=BB3_9 Depth=1
incq %r13
addq $24, %rax
addq $-20, %r12
cmpq $5, %r13
jne .LBB3_9
# %bb.12: # %.preheader111.preheader
movq %rbx, 176(%rsp) # 8-byte Spill
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB3_13: # %.preheader111
# =>This Inner Loop Header: Depth=1
movss (%r15,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %rbx
cmpq $15, %rbx
jne .LBB3_13
# %bb.14:
movq %r15, 160(%rsp) # 8-byte Spill
movl $.Lstr.3, %edi
callq puts@PLT
xorl %eax, %eax
movl $3435973837, %r13d # imm = 0xCCCCCCCD
movl $32, %ebp
movq %r14, 168(%rsp) # 8-byte Spill
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB3_15: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_16 Depth 2
movq %rcx, 184(%rsp) # 8-byte Spill
movq %rax, 192(%rsp) # 8-byte Spill
movl %eax, %r12d
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB3_16: # Parent Loop BB3_15 Depth=1
# => This Inner Loop Header: Depth=2
movl %r12d, %eax
imulq %r13, %rax
shrq $34, %rax
leal (%rax,%rax,4), %eax
movl %r12d, %r15d
subl %eax, %r15d
movss (%r14,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
cmpl $4, %r15d
movl $10, %edi
cmovbl %ebp, %edi
callq putchar@PLT
incq %rbx
incl %r12d
cmpq $5, %rbx
jne .LBB3_16
# %bb.17: # in Loop: Header=BB3_15 Depth=1
movq 184(%rsp), %rcx # 8-byte Reload
incq %rcx
movq 192(%rsp), %rax # 8-byte Reload
addl $5, %eax
addq $20, %r14
cmpq $5, %rcx
jne .LBB3_15
# %bb.18:
movl $.Lstr.3, %edi
callq puts@PLT
movq 32(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 48(%rsp), %rdi
callq hipEventElapsedTime
movss 48(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
leaq 8(%rsp), %rdi
movl $60, %esi
callq hipMalloc
movq 8(%rsp), %rdi
movl $60, %edx
movq 160(%rsp), %r15 # 8-byte Reload
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 511(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_20
# %bb.19:
movq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $5, 56(%rsp)
leaq 112(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 96(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 64(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z16choleskyParaleloPfi, %edi
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_20:
movq 8(%rsp), %rsi
movl $60, %edx
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr.3, %edi
callq puts@PLT
xorl %ebx, %ebx
movq 168(%rsp), %r14 # 8-byte Reload
.p2align 4, 0x90
.LBB3_21: # =>This Inner Loop Header: Depth=1
movss (%r15,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.5, %edi
movb $1, %al
callq printf
incq %rbx
cmpq $15, %rbx
jne .LBB3_21
# %bb.22:
movl $.Lstr.3, %edi
callq puts@PLT
movq 32(%rsp), %rdi
callq hipEventDestroy
movq 16(%rsp), %rdi
callq hipEventDestroy
movq 8(%rsp), %rdi
callq hipFree
movq %r15, %rdi
callq free
movq %r14, %rdi
callq free
movq 176(%rsp), %rdi # 8-byte Reload
callq free
xorl %eax, %eax
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10multMatrizPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7indicesv, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16choleskyParaleloPfi, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10multMatrizPfS_S_i,@object # @_Z10multMatrizPfS_S_i
.section .rodata,"a",@progbits
.globl _Z10multMatrizPfS_S_i
.p2align 3, 0x0
_Z10multMatrizPfS_S_i:
.quad _Z25__device_stub__multMatrizPfS_S_i
.size _Z10multMatrizPfS_S_i, 8
.type _Z7indicesv,@object # @_Z7indicesv
.globl _Z7indicesv
.p2align 3, 0x0
_Z7indicesv:
.quad _Z22__device_stub__indicesv
.size _Z7indicesv, 8
.type _Z16choleskyParaleloPfi,@object # @_Z16choleskyParaleloPfi
.globl _Z16choleskyParaleloPfi
.p2align 3, 0x0
_Z16choleskyParaleloPfi:
.quad _Z31__device_stub__choleskyParaleloPfi
.size _Z16choleskyParaleloPfi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%5.0f "
.size .L.str, 7
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%05.0f"
.size .L.str.2, 7
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Tiempo %4.6f milseg\n\n"
.size .L.str.4, 22
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "%4.4f "
.size .L.str.5, 7
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10multMatrizPfS_S_i"
.size .L__unnamed_1, 22
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z7indicesv"
.size .L__unnamed_2, 12
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z16choleskyParaleloPfi"
.size .L__unnamed_3, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr.3,@object # @str.3
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr.3:
.asciz "\n"
.size .Lstr.3, 2
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__multMatrizPfS_S_i
.addrsig_sym _Z22__device_stub__indicesv
.addrsig_sym _Z31__device_stub__choleskyParaleloPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10multMatrizPfS_S_i
.addrsig_sym _Z7indicesv
.addrsig_sym _Z16choleskyParaleloPfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000ec699_00000000-6_multMatricesCuadradasSol.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z10multMatrizPfS_S_iPfS_S_i
.type _Z35__device_stub__Z10multMatrizPfS_S_iPfS_S_i, @function
_Z35__device_stub__Z10multMatrizPfS_S_iPfS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10multMatrizPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z35__device_stub__Z10multMatrizPfS_S_iPfS_S_i, .-_Z35__device_stub__Z10multMatrizPfS_S_iPfS_S_i
.globl _Z10multMatrizPfS_S_i
.type _Z10multMatrizPfS_S_i, @function
_Z10multMatrizPfS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z10multMatrizPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10multMatrizPfS_S_i, .-_Z10multMatrizPfS_S_i
.globl _Z25__device_stub__Z7indicesvv
.type _Z25__device_stub__Z7indicesvv, @function
_Z25__device_stub__Z7indicesvv:
.LFB2084:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z7indicesv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z25__device_stub__Z7indicesvv, .-_Z25__device_stub__Z7indicesvv
.globl _Z7indicesv
.type _Z7indicesv, @function
_Z7indicesv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z7indicesvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z7indicesv, .-_Z7indicesv
.globl _Z37__device_stub__Z16choleskyParaleloPfiPfi
.type _Z37__device_stub__Z16choleskyParaleloPfiPfi, @function
_Z37__device_stub__Z16choleskyParaleloPfiPfi:
.LFB2086:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z16choleskyParaleloPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z37__device_stub__Z16choleskyParaleloPfiPfi, .-_Z37__device_stub__Z16choleskyParaleloPfiPfi
.globl _Z16choleskyParaleloPfi
.type _Z16choleskyParaleloPfi, @function
_Z16choleskyParaleloPfi:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z16choleskyParaleloPfiPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z16choleskyParaleloPfi, .-_Z16choleskyParaleloPfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string "%5.0f "
.LC5:
.string "\n\n"
.LC6:
.string "%05.0f"
.LC7:
.string "%c"
.LC8:
.string "Tiempo %4.6f milseg\n\n"
.LC9:
.string "%4.4f "
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $152, %rsp
.cfi_def_cfa_offset 208
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
leaq 72(%rsp), %rdi
call cudaEventCreate@PLT
leaq 80(%rsp), %rdi
call cudaEventCreate@PLT
movl $16, 88(%rsp)
movl $16, 92(%rsp)
movl $1, 96(%rsp)
movl $16, 100(%rsp)
movl $16, 104(%rsp)
movl $1, 108(%rsp)
movl $100, %edi
call malloc@PLT
movq %rax, %r12
movq %rax, 16(%rsp)
movl $100, %edi
call malloc@PLT
movq %rax, %rbp
movl $100, %edi
call malloc@PLT
movq %rax, %r13
movl $0, %ebx
.L28:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
mulsd .LC0(%rip), %xmm0
mulsd .LC1(%rip), %xmm0
cvttsd2sil %xmm0, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
addss .LC2(%rip), %xmm0
movss %xmm0, (%r12,%rbx)
movl $0x00000000, 0(%r13,%rbx)
addq $4, %rbx
cmpq $100, %rbx
jne .L28
movq %rbp, %rdi
movq 16(%rsp), %rax
leaq 100(%rax), %rcx
movl $0, %esi
.L29:
leaq -100(%rcx), %rax
movq %rdi, %rdx
.L30:
movss (%rax), %xmm0
movss %xmm0, (%rdx)
addq $20, %rax
addq $4, %rdx
cmpq %rcx, %rax
jne .L30
addl $1, %esi
addq $20, %rdi
addq $4, %rcx
cmpl $5, %esi
jne .L29
leaq 48(%rsp), %rdi
movl $100, %esi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movl $100, %esi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movl $100, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $100, %edx
movq 16(%rsp), %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $100, %edx
movq %rbp, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $100, %edx
movq %r13, %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movl 108(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 100(%rsp), %rdx
movq 88(%rsp), %rdi
movl 96(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L52
.L32:
movl $0, %esi
movq 80(%rsp), %rdi
call cudaEventRecord@PLT
movq 80(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $2, %ecx
movl $100, %edx
movq 64(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movl $60, %edi
call malloc@PLT
movq %rax, %r9
movq %rax, 8(%rsp)
movq %r13, %r12
movq %r13, %r8
movl $0, %edi
movl $0, %esi
movl $4, %r11d
leaq 4(%rax), %r10
.L33:
movl %edi, %ebx
movslq %esi, %rcx
leaq (%r9,%rcx,4), %rax
movq %r9, %rbp
movq %r11, %rdx
subq %rdi, %rdx
addq %rcx, %rdx
leaq (%r10,%rdx,4), %rcx
movq %r8, %rdx
.L34:
movss (%rdx), %xmm0
movss %xmm0, (%rax)
addq $20, %rdx
addq $4, %rax
cmpq %rcx, %rax
jne .L34
addl $5, %esi
subl %ebx, %esi
addq $1, %rdi
addq $24, %r8
cmpq $5, %rdi
jne .L33
movq 8(%rsp), %rbx
leaq 60(%rbx), %r14
leaq .LC4(%rip), %r15
.L36:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r15, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r14, %rbx
jne .L36
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 100(%r13), %rax
movl $0, %r15d
movq %rbp, 24(%rsp)
movl %r15d, %ebp
movq %r12, %r15
movq %rax, %r12
.L37:
movl $0, %ebx
.L39:
pxor %xmm0, %xmm0
cvtss2sd (%r15,%rbx,4), %xmm0
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leal 0(%rbp,%rbx), %edx
movslq %edx, %rax
imulq $1717986919, %rax, %rax
sarq $33, %rax
movl %edx, %ecx
sarl $31, %ecx
subl %ecx, %eax
leal (%rax,%rax,4), %eax
subl %eax, %edx
cmpl $3, %edx
movl $32, %edx
movl $10, %eax
cmovg %eax, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $5, %rbx
jne .L39
addl $5, %ebp
addq $20, %r15
cmpq %r12, %r15
jne .L37
movq 24(%rsp), %rbp
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 44(%rsp), %rdi
movq 80(%rsp), %rdx
movq 72(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 44(%rsp), %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq 56(%rsp), %rdi
movl $60, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $60, %edx
movq 8(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $512, 124(%rsp)
movl $1, 128(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 124(%rsp), %rdx
movl $1, %ecx
movq 112(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L41:
movl $2, %ecx
movl $60, %edx
movq 56(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC9(%rip), %rbx
.L42:
pxor %xmm0, %xmm0
cvtss2sd 0(%rbp), %xmm0
movq %rbx, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbp
cmpq %r14, %rbp
jne .L42
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 72(%rsp), %rdi
call cudaEventDestroy@PLT
movq 80(%rsp), %rdi
call cudaEventDestroy@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 16(%rsp), %rdi
call free@PLT
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L54
movl $0, %eax
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L52:
.cfi_restore_state
movl $5, %ecx
movq 64(%rsp), %rdx
movq 56(%rsp), %rsi
movq 48(%rsp), %rdi
call _Z35__device_stub__Z10multMatrizPfS_S_iPfS_S_i
jmp .L32
.L53:
movl $5, %esi
movq 56(%rsp), %rdi
call _Z37__device_stub__Z16choleskyParaleloPfiPfi
jmp .L41
.L54:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z16choleskyParaleloPfi"
.LC11:
.string "_Z7indicesv"
.LC12:
.string "_Z10multMatrizPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2089:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z16choleskyParaleloPfi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z7indicesv(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z10multMatrizPfS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1075314688
.align 8
.LC1:
.long 0
.long 1040187392
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 1065353216
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "multMatricesCuadradasSol.hip"
.globl _Z25__device_stub__multMatrizPfS_S_i # -- Begin function _Z25__device_stub__multMatrizPfS_S_i
.p2align 4, 0x90
.type _Z25__device_stub__multMatrizPfS_S_i,@function
_Z25__device_stub__multMatrizPfS_S_i: # @_Z25__device_stub__multMatrizPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10multMatrizPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z25__device_stub__multMatrizPfS_S_i, .Lfunc_end0-_Z25__device_stub__multMatrizPfS_S_i
.cfi_endproc
# -- End function
.globl _Z22__device_stub__indicesv # -- Begin function _Z22__device_stub__indicesv
.p2align 4, 0x90
.type _Z22__device_stub__indicesv,@function
_Z22__device_stub__indicesv: # @_Z22__device_stub__indicesv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z7indicesv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end1:
.size _Z22__device_stub__indicesv, .Lfunc_end1-_Z22__device_stub__indicesv
.cfi_endproc
# -- End function
.globl _Z31__device_stub__choleskyParaleloPfi # -- Begin function _Z31__device_stub__choleskyParaleloPfi
.p2align 4, 0x90
.type _Z31__device_stub__choleskyParaleloPfi,@function
_Z31__device_stub__choleskyParaleloPfi: # @_Z31__device_stub__choleskyParaleloPfi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z16choleskyParaleloPfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end2:
.size _Z31__device_stub__choleskyParaleloPfi, .Lfunc_end2-_Z31__device_stub__choleskyParaleloPfi
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0x4018000000000000 # double 6
.LCPI3_1:
.quad 0x3e00000000000000 # double 4.6566128730773926E-10
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI3_2:
.long 0x3f800000 # float 1
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
xorl %r12d, %r12d
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
leaq 32(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
callq hipEventCreate
movl $100, %edi
callq malloc
movq %rax, %rbx
movl $100, %edi
callq malloc
movq %rax, %r15
movl $100, %edi
callq malloc
movq %rax, %r14
.p2align 4, 0x90
.LBB3_1: # =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
mulsd .LCPI3_0(%rip), %xmm0
mulsd .LCPI3_1(%rip), %xmm0
cvttpd2dq %xmm0, %xmm0
cvtdq2ps %xmm0, %xmm0
addss .LCPI3_2(%rip), %xmm0
movss %xmm0, (%rbx,%r12,4)
movl $0, (%r14,%r12,4)
incq %r12
cmpq $25, %r12
jne .LBB3_1
# %bb.2: # %.preheader113.preheader
xorl %eax, %eax
movq %rbx, %rcx
movq %r15, %rdx
.p2align 4, 0x90
.LBB3_3: # %.preheader113
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
movq %rcx, %rsi
xorl %edi, %edi
.p2align 4, 0x90
.LBB3_4: # Parent Loop BB3_3 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rsi), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%rdx,%rdi,4)
incq %rdi
addq $20, %rsi
cmpq $5, %rdi
jne .LBB3_4
# %bb.5: # in Loop: Header=BB3_3 Depth=1
incq %rax
addq $20, %rdx
addq $4, %rcx
cmpq $5, %rax
jne .LBB3_3
# %bb.6:
leaq 40(%rsp), %rdi
movl $100, %r12d
movl $100, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $100, %esi
callq hipMalloc
leaq 24(%rsp), %rdi
movl $100, %esi
callq hipMalloc
movq 40(%rsp), %rdi
movl $100, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $100, %edx
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $100, %edx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $68719476752, %rdi # imm = 0x1000000010
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_8
# %bb.7:
movq 40(%rsp), %rax
movq 8(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 112(%rsp)
movq %rcx, 72(%rsp)
movq %rdx, 64(%rsp)
movl $5, 124(%rsp)
leaq 112(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rax
movq %rax, 144(%rsp)
leaq 124(%rsp), %rax
movq %rax, 152(%rsp)
leaq 96(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z10multMatrizPfS_S_i, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_8:
movq 16(%rsp), %rdi
xorl %r13d, %r13d
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movl $100, %edx
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
movq 40(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq %r15, %rdi
callq free
movl $60, %edi
callq malloc
movq %rax, %r15
movq %r14, %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB3_9: # %.preheader112
# =>This Loop Header: Depth=1
# Child Loop BB3_10 Depth 2
movslq %ecx, %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB3_10: # Parent Loop BB3_9 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rax,%rdx), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%r15,%rcx,4)
incq %rcx
addq $20, %rdx
cmpq %rdx, %r12
jne .LBB3_10
# %bb.11: # in Loop: Header=BB3_9 Depth=1
incq %r13
addq $24, %rax
addq $-20, %r12
cmpq $5, %r13
jne .LBB3_9
# %bb.12: # %.preheader111.preheader
movq %rbx, 176(%rsp) # 8-byte Spill
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB3_13: # %.preheader111
# =>This Inner Loop Header: Depth=1
movss (%r15,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %rbx
cmpq $15, %rbx
jne .LBB3_13
# %bb.14:
movq %r15, 160(%rsp) # 8-byte Spill
movl $.Lstr.3, %edi
callq puts@PLT
xorl %eax, %eax
movl $3435973837, %r13d # imm = 0xCCCCCCCD
movl $32, %ebp
movq %r14, 168(%rsp) # 8-byte Spill
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB3_15: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_16 Depth 2
movq %rcx, 184(%rsp) # 8-byte Spill
movq %rax, 192(%rsp) # 8-byte Spill
movl %eax, %r12d
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB3_16: # Parent Loop BB3_15 Depth=1
# => This Inner Loop Header: Depth=2
movl %r12d, %eax
imulq %r13, %rax
shrq $34, %rax
leal (%rax,%rax,4), %eax
movl %r12d, %r15d
subl %eax, %r15d
movss (%r14,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
cmpl $4, %r15d
movl $10, %edi
cmovbl %ebp, %edi
callq putchar@PLT
incq %rbx
incl %r12d
cmpq $5, %rbx
jne .LBB3_16
# %bb.17: # in Loop: Header=BB3_15 Depth=1
movq 184(%rsp), %rcx # 8-byte Reload
incq %rcx
movq 192(%rsp), %rax # 8-byte Reload
addl $5, %eax
addq $20, %r14
cmpq $5, %rcx
jne .LBB3_15
# %bb.18:
movl $.Lstr.3, %edi
callq puts@PLT
movq 32(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 48(%rsp), %rdi
callq hipEventElapsedTime
movss 48(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
leaq 8(%rsp), %rdi
movl $60, %esi
callq hipMalloc
movq 8(%rsp), %rdi
movl $60, %edx
movq 160(%rsp), %r15 # 8-byte Reload
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 511(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_20
# %bb.19:
movq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $5, 56(%rsp)
leaq 112(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 96(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 64(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z16choleskyParaleloPfi, %edi
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_20:
movq 8(%rsp), %rsi
movl $60, %edx
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr.3, %edi
callq puts@PLT
xorl %ebx, %ebx
movq 168(%rsp), %r14 # 8-byte Reload
.p2align 4, 0x90
.LBB3_21: # =>This Inner Loop Header: Depth=1
movss (%r15,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.5, %edi
movb $1, %al
callq printf
incq %rbx
cmpq $15, %rbx
jne .LBB3_21
# %bb.22:
movl $.Lstr.3, %edi
callq puts@PLT
movq 32(%rsp), %rdi
callq hipEventDestroy
movq 16(%rsp), %rdi
callq hipEventDestroy
movq 8(%rsp), %rdi
callq hipFree
movq %r15, %rdi
callq free
movq %r14, %rdi
callq free
movq 176(%rsp), %rdi # 8-byte Reload
callq free
xorl %eax, %eax
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10multMatrizPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7indicesv, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16choleskyParaleloPfi, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10multMatrizPfS_S_i,@object # @_Z10multMatrizPfS_S_i
.section .rodata,"a",@progbits
.globl _Z10multMatrizPfS_S_i
.p2align 3, 0x0
_Z10multMatrizPfS_S_i:
.quad _Z25__device_stub__multMatrizPfS_S_i
.size _Z10multMatrizPfS_S_i, 8
.type _Z7indicesv,@object # @_Z7indicesv
.globl _Z7indicesv
.p2align 3, 0x0
_Z7indicesv:
.quad _Z22__device_stub__indicesv
.size _Z7indicesv, 8
.type _Z16choleskyParaleloPfi,@object # @_Z16choleskyParaleloPfi
.globl _Z16choleskyParaleloPfi
.p2align 3, 0x0
_Z16choleskyParaleloPfi:
.quad _Z31__device_stub__choleskyParaleloPfi
.size _Z16choleskyParaleloPfi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%5.0f "
.size .L.str, 7
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%05.0f"
.size .L.str.2, 7
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Tiempo %4.6f milseg\n\n"
.size .L.str.4, 22
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "%4.4f "
.size .L.str.5, 7
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10multMatrizPfS_S_i"
.size .L__unnamed_1, 22
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z7indicesv"
.size .L__unnamed_2, 12
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z16choleskyParaleloPfi"
.size .L__unnamed_3, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr.3,@object # @str.3
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr.3:
.asciz "\n"
.size .Lstr.3, 2
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__multMatrizPfS_S_i
.addrsig_sym _Z22__device_stub__indicesv
.addrsig_sym _Z31__device_stub__choleskyParaleloPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10multMatrizPfS_S_i
.addrsig_sym _Z7indicesv
.addrsig_sym _Z16choleskyParaleloPfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
Reference: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.
html#ixzz4CtH09yed
*/
#include <cstdlib>
#include <ctime>
#include <cstdio>
#include <iostream>
using namespace std;
// Generate random floats between 0 and UP_BOUND
#define UP_BOUND 100;
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 20
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
float value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaError_t err = cudaMalloc(&d_A.elements, size);
cout << "CUDA malloc A: " << cudaGetErrorString(err) << endl;
err = cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
cout << "Copy A to device: " << cudaGetErrorString(err) << "\n" << endl;
Matrix d_B;
d_B.width = d_B.stride = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
err = cudaMalloc(&d_B.elements, size);
cout << "CUDA malloc B: " << cudaGetErrorString(err) << endl;
err = cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
cout << "Copy B to device: " << cudaGetErrorString(err) << "\n" << endl;
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
err = cudaMalloc(&d_C.elements, size);
cout << "CUDA malloc C: " << cudaGetErrorString(err) << endl;
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
err = cudaThreadSynchronize();
cout << "Run kernel: " << cudaGetErrorString(err) << endl;
// Read C from device memory
err = cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
cout << "Copy C off of device: " << cudaGetErrorString(err) << "\n" << endl;
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0.0;
for (int i = 0; i < (A.width - 1)/BLOCK_SIZE + 1; ++i) {
int temp = i * BLOCK_SIZE + threadIdx.x;
if (row < A.height && temp < A.width)
As[threadIdx.y][threadIdx.x] = A.elements[row * A.width + temp];
else
As[threadIdx.y][threadIdx.x] = 0.0;
temp = i * BLOCK_SIZE + threadIdx.y;
if (col < B.width && temp < B.height)
Bs[threadIdx.y][threadIdx.x] = B.elements[temp * B.width + col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; ++j)
Cvalue += As[threadIdx.y][j] * Bs[j][threadIdx.x];
__syncthreads();
}
if (row < C.height && col < C.width)
C.elements[row * C.width + col] = Cvalue;
/*---Original code from CUDA C Programming Guide---*/
/*
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
*/
}
int main(int argc, char const *argv[])
{
clock_t t;
Matrix A, B, C;
int a1, a2, b1, b2;
int i, j;
srand(time(NULL));
if (argc < 4)
cout << "Usage: ./accuracy.o A.height A.width B.width" << endl;
// Get dimensions of A and B
// Run $ ./matrixMul 1 1000000 400
a1 = atoi(argv[1]); // A's height
a2 = atoi(argv[2]); // A's width
b1 = a2; // B's height
b2 = atoi(argv[3]); // B's width
A.height = a1;
A.width = A.stride = a2;
A.elements = new float[A.width * A.height];
B.height = b1;
B.width = B.stride = b2;
B.elements = new float[B.width * B. height];
C.height = A.height;
C.width = C.stride = B.width;
C.elements = new float[C.width * C.height];
// Fill A and B with random floats
for (i = 0; i < A.height; ++i)
for (j = 0; j < A.width; ++j)
A.elements[i * A.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND;
for (i = 0; i < B.height; ++i)
for (j = 0; j < B.width; ++j)
B.elements[i * B.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND;
// Call MatMul(), and therefore MatMulKernel()
t = clock();
MatMul(A, B, C);
// Print time multiplication took
t = clock() - t;
cout << "It took me " << fixed << ((float)t)/CLOCKS_PER_SEC;
cout << " seconds.\n" << endl;
// Print A, B, and C
for (i = 0; i < min(10, A.height); ++i) {
for (j = 0; j < min(10, A.width); ++j)
cout << fixed << A.elements[i * A.width + j] << "\t";
cout << endl;
}
cout << endl;
for (i = 0; i < min(10, B.height); ++i) {
for (j = 0; j < min(10, B.width); ++j)
cout << fixed << B.elements[i * B.width + j] << "\t";
cout << endl;
}
cout << endl;
for (i = 0; i < min(10, C.height); ++i) {
for (j = 0; j < min(10, C.width); ++j)
cout << fixed << C.elements[i * C.width + j] << "\t";
cout << endl;
}
cout << endl;
delete[] A.elements;
delete[] B.elements;
delete[] C.elements;
return 0;
} | code for sm_80
Function : _Z12MatMulKernel6MatrixS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R9, SR_CTAID.X ; /* 0x0000000000097919 */
/* 0x000e220000002500 */
/*0020*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */
/* 0x000fe20000000f00 */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0040*/ HFMA2.MMA R21, -RZ, RZ, 0, 0 ; /* 0x00000000ff157435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0060*/ ISETP.GE.AND P1, PT, R2, -0x12, PT ; /* 0xffffffee0200780c */
/* 0x000fc60003f26270 */
/*0070*/ S2R R19, SR_CTAID.Y ; /* 0x0000000000137919 */
/* 0x000e680000002600 */
/*0080*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e620000002200 */
/*0090*/ IMAD R18, R9, 0x14, R0 ; /* 0x0000001409127824 */
/* 0x001fca00078e0200 */
/*00a0*/ ISETP.GE.AND P0, PT, R18, c[0x0][0x190], PT ; /* 0x0000640012007a0c */
/* 0x000fe20003f06270 */
/*00b0*/ IMAD R19, R19, 0x14, R3 ; /* 0x0000001413137824 */
/* 0x002fca00078e0203 */
/*00c0*/ ISETP.GE.OR P0, PT, R19, c[0x0][0x194], P0 ; /* 0x0000650013007a0c */
/* 0x000fe20000706670 */
/*00d0*/ @!P1 BRA 0x600 ; /* 0x0000052000009947 */
/* 0x000fd80003800000 */
/*00e0*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fe20007ffe0ff */
/*00f0*/ IMAD R6, R19, c[0x0][0x160], R0 ; /* 0x0000580013067a24 */
/* 0x000fe200078e0200 */
/*0100*/ MOV R7, 0x4 ; /* 0x0000000400077802 */
/* 0x000fe20000000f00 */
/*0110*/ UMOV UR4, 0xffffffff ; /* 0xffffffff00047882 */
/* 0x000fe20000000000 */
/*0120*/ MOV R21, RZ ; /* 0x000000ff00157202 */
/* 0x000fe20000000f00 */
/*0130*/ IMAD.HI R4, R2, 0x66666667, RZ ; /* 0x6666666702047827 */
/* 0x000fe200078e02ff */
/*0140*/ LEA R16, R0, 0x640, 0x2 ; /* 0x0000064000107811 */
/* 0x000fc600078e10ff */
/*0150*/ IMAD R2, R3, c[0x0][0x178], R0 ; /* 0x00005e0003027a24 */
/* 0x000fe200078e0200 */
/*0160*/ SHF.R.U32.HI R5, RZ, 0x1f, R4 ; /* 0x0000001fff057819 */
/* 0x000fe20000011604 */
/*0170*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc600078e0207 */
/*0180*/ LEA.HI.SX32 R4, R4, R5, 0x1d ; /* 0x0000000504047211 */
/* 0x000fe200078feaff */
/*0190*/ IMAD R5, R3, 0x50, RZ ; /* 0x0000005003057824 */
/* 0x000fe400078e02ff */
/*01a0*/ IMAD R2, R9, 0x14, R2 ; /* 0x0000001409027824 */
/* 0x000fc600078e0202 */
/*01b0*/ LEA R17, R0, R5, 0x2 ; /* 0x0000000500117211 */
/* 0x000fe400078e10ff */
/*01c0*/ ISETP.GE.AND P1, PT, R3, c[0x0][0x17c], PT ; /* 0x00005f0003007a0c */
/* 0x000fe20003f26270 */
/*01d0*/ HFMA2.MMA R26, -RZ, RZ, 0, 0 ; /* 0x00000000ff1a7435 */
/* 0x000fe200000001ff */
/*01e0*/ ISETP.GE.AND P2, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fe40003f46270 */
/*01f0*/ ISETP.GE.OR P1, PT, R18, c[0x0][0x178], P1 ; /* 0x00005e0012007a0c */
/* 0x000fe40000f26670 */
/*0200*/ ISETP.GE.OR P2, PT, R19, c[0x0][0x164], P2 ; /* 0x0000590013007a0c */
/* 0x000fe40001746670 */
/*0210*/ MOV R28, RZ ; /* 0x000000ff001c7202 */
/* 0x000fd20000000f00 */
/*0220*/ @!P1 MOV R25, 0x4 ; /* 0x0000000400199802 */
/* 0x000fe40000000f00 */
/*0230*/ @!P2 LDG.E R26, [R6.64] ; /* 0x00000006061aa981 */
/* 0x0000a6000c1e1900 */
/*0240*/ @!P1 IMAD.WIDE R24, R2, R25, c[0x0][0x188] ; /* 0x0000620002189625 */
/* 0x000fca00078e0219 */
/*0250*/ @!P1 LDG.E R28, [R24.64] ; /* 0x00000006181c9981 */
/* 0x000ee2000c1e1900 */
/*0260*/ UIADD3 UR4, UR4, 0x1, URZ ; /* 0x0000000104047890 */
/* 0x000fcc000fffe03f */
/*0270*/ ISETP.LE.AND P1, PT, R4, UR4, PT ; /* 0x0000000404007c0c */
/* 0x000fe4000bf23270 */
/*0280*/ IADD3 R6, P2, R6, 0x50, RZ ; /* 0x0000005006067810 */
/* 0x001fe40007f5e0ff */
/*0290*/ IADD3 R3, R3, 0x14, RZ ; /* 0x0000001403037810 */
/* 0x000fe40007ffe0ff */
/*02a0*/ IADD3 R0, R0, 0x14, RZ ; /* 0x0000001400007810 */
/* 0x000fe40007ffe0ff */
/*02b0*/ IADD3.X R7, RZ, R7, RZ, P2, !PT ; /* 0x00000007ff077210 */
/* 0x000fe200017fe4ff */
/*02c0*/ STS [R17], R26 ; /* 0x0000001a11007388 */
/* 0x004fe80000000800 */
/*02d0*/ STS [R17+0x640], R28 ; /* 0x0006401c11007388 */
/* 0x008fe80000000800 */
/*02e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*02f0*/ LDS R29, [R16] ; /* 0x00000000101d7984 */
/* 0x000fe80000000800 */
/*0300*/ LDS.128 R8, [R5] ; /* 0x0000000005087984 */
/* 0x000e280000000c00 */
/*0310*/ LDS R26, [R16+0x50] ; /* 0x00005000101a7984 */
/* 0x000e680000000800 */
/*0320*/ LDS R27, [R16+0xa0] ; /* 0x0000a000101b7984 */
/* 0x000ea80000000800 */
/*0330*/ LDS R20, [R16+0xf0] ; /* 0x0000f00010147984 */
/* 0x000ee80000000800 */
/*0340*/ LDS R23, [R16+0x140] ; /* 0x0001400010177984 */
/* 0x000fe80000000800 */
/*0350*/ LDS.128 R12, [R5+0x10] ; /* 0x00001000050c7984 */
/* 0x000f280000000c00 */
/*0360*/ LDS R22, [R16+0x190] ; /* 0x0001900010167984 */
/* 0x000f680000000800 */
/*0370*/ LDS R25, [R16+0x1e0] ; /* 0x0001e00010197984 */
/* 0x000f680000000800 */
/*0380*/ LDS R24, [R16+0x230] ; /* 0x0002300010187984 */
/* 0x000f620000000800 */
/*0390*/ FFMA R8, R29, R8, R21 ; /* 0x000000081d087223 */
/* 0x001fc60000000015 */
/*03a0*/ LDS R21, [R16+0x280] ; /* 0x0002800010157984 */
/* 0x000fe20000000800 */
/*03b0*/ FFMA R8, R26, R9, R8 ; /* 0x000000091a087223 */
/* 0x002fc80000000008 */
/*03c0*/ FFMA R8, R27, R10, R8 ; /* 0x0000000a1b087223 */
/* 0x004fc80000000008 */
/*03d0*/ FFMA R26, R20, R11, R8 ; /* 0x0000000b141a7223 */
/* 0x008fe40000000008 */
/*03e0*/ LDS.128 R8, [R5+0x20] ; /* 0x0000200005087984 */
/* 0x000e280000000c00 */
/*03f0*/ LDS R20, [R16+0x2d0] ; /* 0x0002d00010147984 */
/* 0x000e620000000800 */
/*0400*/ FFMA R12, R23, R12, R26 ; /* 0x0000000c170c7223 */
/* 0x010fc6000000001a */
/*0410*/ LDS R23, [R16+0x320] ; /* 0x0003200010177984 */
/* 0x000ea20000000800 */
/*0420*/ FFMA R12, R22, R13, R12 ; /* 0x0000000d160c7223 */
/* 0x020fc6000000000c */
/*0430*/ LDS R22, [R16+0x370] ; /* 0x0003700010167984 */
/* 0x000ee20000000800 */
/*0440*/ FFMA R12, R25, R14, R12 ; /* 0x0000000e190c7223 */
/* 0x000fc6000000000c */
/*0450*/ LDS R25, [R16+0x3c0] ; /* 0x0003c00010197984 */
/* 0x000fe20000000800 */
/*0460*/ FFMA R24, R24, R15, R12 ; /* 0x0000000f18187223 */
/* 0x000fc6000000000c */
/*0470*/ LDS.128 R12, [R5+0x30] ; /* 0x00003000050c7984 */
/* 0x000f220000000c00 */
/*0480*/ FFMA R8, R21, R8, R24 ; /* 0x0000000815087223 */
/* 0x001fc60000000018 */
/*0490*/ LDS R24, [R16+0x410] ; /* 0x0004100010187984 */
/* 0x000e220000000800 */
/*04a0*/ FFMA R8, R20, R9, R8 ; /* 0x0000000914087223 */
/* 0x002fc60000000008 */
/*04b0*/ LDS R21, [R16+0x460] ; /* 0x0004600010157984 */
/* 0x000e620000000800 */
/*04c0*/ FFMA R8, R23, R10, R8 ; /* 0x0000000a17087223 */
/* 0x004fc60000000008 */
/*04d0*/ LDS R20, [R16+0x4b0] ; /* 0x0004b00010147984 */
/* 0x000ea20000000800 */
/*04e0*/ FFMA R26, R22, R11, R8 ; /* 0x0000000b161a7223 */
/* 0x008fc60000000008 */
/*04f0*/ LDS R23, [R16+0x500] ; /* 0x0005000010177984 */
/* 0x000fe80000000800 */
/*0500*/ LDS.128 R8, [R5+0x40] ; /* 0x0000400005087984 */
/* 0x000ee80000000c00 */
/*0510*/ LDS R22, [R16+0x550] ; /* 0x0005500010167984 */
/* 0x000f620000000800 */
/*0520*/ FFMA R26, R25, R12, R26 ; /* 0x0000000c191a7223 */
/* 0x010fc6000000001a */
/*0530*/ LDS R25, [R16+0x5a0] ; /* 0x0005a00010197984 */
/* 0x000f280000000800 */
/*0540*/ LDS R12, [R16+0x5f0] ; /* 0x0005f000100c7984 */
/* 0x000f220000000800 */
/*0550*/ FFMA R13, R24, R13, R26 ; /* 0x0000000d180d7223 */
/* 0x001fc8000000001a */
/*0560*/ FFMA R13, R21, R14, R13 ; /* 0x0000000e150d7223 */
/* 0x002fc8000000000d */
/*0570*/ FFMA R13, R20, R15, R13 ; /* 0x0000000f140d7223 */
/* 0x004fc8000000000d */
/*0580*/ FFMA R8, R23, R8, R13 ; /* 0x0000000817087223 */
/* 0x008fc8000000000d */
/*0590*/ FFMA R8, R22, R9, R8 ; /* 0x0000000916087223 */
/* 0x020fe20000000008 */
/*05a0*/ HFMA2.MMA R9, -RZ, RZ, 0, 1.1920928955078125e-06 ; /* 0x00000014ff097435 */
/* 0x000fc600000001ff */
/*05b0*/ FFMA R8, R25, R10, R8 ; /* 0x0000000a19087223 */
/* 0x010fc80000000008 */
/*05c0*/ FFMA R21, R12, R11, R8 ; /* 0x0000000b0c157223 */
/* 0x000fc60000000008 */
/*05d0*/ IMAD R2, R9, c[0x0][0x178], R2 ; /* 0x00005e0009027a24 */
/* 0x000fe200078e0202 */
/*05e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*05f0*/ @!P1 BRA 0x1c0 ; /* 0xfffffbc000009947 */
/* 0x000fea000383ffff */
/*0600*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0610*/ MOV R3, 0x4 ; /* 0x0000000400037802 */
/* 0x000fe20000000f00 */
/*0620*/ IMAD R2, R19, c[0x0][0x190], R18 ; /* 0x0000640013027a24 */
/* 0x000fc800078e0212 */
/*0630*/ IMAD.WIDE R2, R2, R3, c[0x0][0x1a0] ; /* 0x0000680002027625 */
/* 0x000fca00078e0203 */
/*0640*/ STG.E [R2.64], R21 ; /* 0x0000001502007986 */
/* 0x000fe2000c101906 */
/*0650*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0660*/ BRA 0x660; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0670*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0680*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0690*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
Reference: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.
html#ixzz4CtH09yed
*/
#include <cstdlib>
#include <ctime>
#include <cstdio>
#include <iostream>
using namespace std;
// Generate random floats between 0 and UP_BOUND
#define UP_BOUND 100;
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 20
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
float value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaError_t err = cudaMalloc(&d_A.elements, size);
cout << "CUDA malloc A: " << cudaGetErrorString(err) << endl;
err = cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
cout << "Copy A to device: " << cudaGetErrorString(err) << "\n" << endl;
Matrix d_B;
d_B.width = d_B.stride = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
err = cudaMalloc(&d_B.elements, size);
cout << "CUDA malloc B: " << cudaGetErrorString(err) << endl;
err = cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
cout << "Copy B to device: " << cudaGetErrorString(err) << "\n" << endl;
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
err = cudaMalloc(&d_C.elements, size);
cout << "CUDA malloc C: " << cudaGetErrorString(err) << endl;
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
err = cudaThreadSynchronize();
cout << "Run kernel: " << cudaGetErrorString(err) << endl;
// Read C from device memory
err = cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
cout << "Copy C off of device: " << cudaGetErrorString(err) << "\n" << endl;
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0.0;
for (int i = 0; i < (A.width - 1)/BLOCK_SIZE + 1; ++i) {
int temp = i * BLOCK_SIZE + threadIdx.x;
if (row < A.height && temp < A.width)
As[threadIdx.y][threadIdx.x] = A.elements[row * A.width + temp];
else
As[threadIdx.y][threadIdx.x] = 0.0;
temp = i * BLOCK_SIZE + threadIdx.y;
if (col < B.width && temp < B.height)
Bs[threadIdx.y][threadIdx.x] = B.elements[temp * B.width + col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; ++j)
Cvalue += As[threadIdx.y][j] * Bs[j][threadIdx.x];
__syncthreads();
}
if (row < C.height && col < C.width)
C.elements[row * C.width + col] = Cvalue;
/*---Original code from CUDA C Programming Guide---*/
/*
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
*/
}
int main(int argc, char const *argv[])
{
clock_t t;
Matrix A, B, C;
int a1, a2, b1, b2;
int i, j;
srand(time(NULL));
if (argc < 4)
cout << "Usage: ./accuracy.o A.height A.width B.width" << endl;
// Get dimensions of A and B
// Run $ ./matrixMul 1 1000000 400
a1 = atoi(argv[1]); // A's height
a2 = atoi(argv[2]); // A's width
b1 = a2; // B's height
b2 = atoi(argv[3]); // B's width
A.height = a1;
A.width = A.stride = a2;
A.elements = new float[A.width * A.height];
B.height = b1;
B.width = B.stride = b2;
B.elements = new float[B.width * B. height];
C.height = A.height;
C.width = C.stride = B.width;
C.elements = new float[C.width * C.height];
// Fill A and B with random floats
for (i = 0; i < A.height; ++i)
for (j = 0; j < A.width; ++j)
A.elements[i * A.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND;
for (i = 0; i < B.height; ++i)
for (j = 0; j < B.width; ++j)
B.elements[i * B.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND;
// Call MatMul(), and therefore MatMulKernel()
t = clock();
MatMul(A, B, C);
// Print time multiplication took
t = clock() - t;
cout << "It took me " << fixed << ((float)t)/CLOCKS_PER_SEC;
cout << " seconds.\n" << endl;
// Print A, B, and C
for (i = 0; i < min(10, A.height); ++i) {
for (j = 0; j < min(10, A.width); ++j)
cout << fixed << A.elements[i * A.width + j] << "\t";
cout << endl;
}
cout << endl;
for (i = 0; i < min(10, B.height); ++i) {
for (j = 0; j < min(10, B.width); ++j)
cout << fixed << B.elements[i * B.width + j] << "\t";
cout << endl;
}
cout << endl;
for (i = 0; i < min(10, C.height); ++i) {
for (j = 0; j < min(10, C.width); ++j)
cout << fixed << C.elements[i * C.width + j] << "\t";
cout << endl;
}
cout << endl;
delete[] A.elements;
delete[] B.elements;
delete[] C.elements;
return 0;
} | .file "tmpxft_001b5e4a_00000000-6_share.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3676:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3676:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10GetElement6Matrixii
.type _Z10GetElement6Matrixii, @function
_Z10GetElement6Matrixii:
.LFB3669:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE3669:
.size _Z10GetElement6Matrixii, .-_Z10GetElement6Matrixii
.globl _Z10SetElement6Matrixiif
.type _Z10SetElement6Matrixiif, @function
_Z10SetElement6Matrixiif:
.LFB3670:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE3670:
.size _Z10SetElement6Matrixiif, .-_Z10SetElement6Matrixiif
.globl _Z12GetSubMatrix6Matrixii
.type _Z12GetSubMatrix6Matrixii, @function
_Z12GetSubMatrix6Matrixii:
.LFB3671:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE3671:
.size _Z12GetSubMatrix6Matrixii, .-_Z12GetSubMatrix6Matrixii
.globl _Z41__device_stub__Z12MatMulKernel6MatrixS_S_R6MatrixS0_S0_
.type _Z41__device_stub__Z12MatMulKernel6MatrixS_S_R6MatrixS0_S0_, @function
_Z41__device_stub__Z12MatMulKernel6MatrixS_S_R6MatrixS0_S0_:
.LFB3698:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movq %rdi, 64(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 80(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 120
pushq 8(%rsp)
.cfi_def_cfa_offset 128
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z12MatMulKernel6MatrixS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3698:
.size _Z41__device_stub__Z12MatMulKernel6MatrixS_S_R6MatrixS0_S0_, .-_Z41__device_stub__Z12MatMulKernel6MatrixS_S_R6MatrixS0_S0_
.globl _Z12MatMulKernel6MatrixS_S_
.type _Z12MatMulKernel6MatrixS_S_, @function
_Z12MatMulKernel6MatrixS_S_:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq 64(%rsp), %rdx
leaq 40(%rsp), %rsi
leaq 16(%rsp), %rdi
call _Z41__device_stub__Z12MatMulKernel6MatrixS_S_R6MatrixS0_S0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _Z12MatMulKernel6MatrixS_S_, .-_Z12MatMulKernel6MatrixS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "CUDA malloc A: "
.LC1:
.string "Copy A to device: "
.LC2:
.string "\n"
.LC3:
.string "CUDA malloc B: "
.LC4:
.string "Copy B to device: "
.LC5:
.string "CUDA malloc C: "
.LC6:
.string "Run kernel: "
.LC7:
.string "Copy C off of device: "
.text
.globl _Z6MatMul6MatrixS_S_
.type _Z6MatMul6MatrixS_S_, @function
_Z6MatMul6MatrixS_S_:
.LFB3672:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $248, %rsp
.cfi_def_cfa_offset 304
movq %fs:40, %rax
movq %rax, 232(%rsp)
xorl %eax, %eax
movl 304(%rsp), %eax
movl 308(%rsp), %r13d
movl 328(%rsp), %ebx
movl 332(%rsp), %r12d
movl 352(%rsp), %ebp
movl 356(%rsp), %r15d
movl %eax, 56(%rsp)
movl %eax, 48(%rsp)
movl %r13d, 52(%rsp)
imull %r13d, %eax
cltq
salq $2, %rax
movq %rax, 8(%rsp)
leaq 64(%rsp), %rdi
movq %rax, %rsi
call cudaMalloc@PLT
movl %eax, %r14d
movl $15, %edx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %r14d, %edi
call cudaGetErrorString@PLT
testq %rax, %rax
je .L63
movq %rax, %r14
movq %rax, %rdi
call strlen@PLT
movq %rax, %rdx
movq %r14, %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
.L19:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rip), %rdx
movq 240(%rdx,%rax), %r14
testq %r14, %r14
je .L64
cmpb $0, 56(%r14)
je .L22
movzbl 67(%r14), %esi
.L23:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %r14
movq %r14, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movl $1, %ecx
movq 8(%rsp), %rdx
movq 320(%rsp), %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, 8(%rsp)
movl $18, %edx
leaq .LC1(%rip), %rsi
movq %r14, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl 8(%rsp), %edi
call cudaGetErrorString@PLT
movq %rax, %r14
testq %rax, %rax
je .L65
movq %rax, %rdi
call strlen@PLT
movq %rax, %rdx
movq %r14, %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
.L25:
movl $1, %edx
leaq .LC2(%rip), %rsi
leaq _ZSt4cout(%rip), %r14
movq %r14, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq 240(%r14,%rax), %r14
testq %r14, %r14
je .L66
cmpb $0, 56(%r14)
je .L28
movzbl 67(%r14), %esi
.L29:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %r14
movq %r14, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movl %ebx, 88(%rsp)
movl %ebx, 80(%rsp)
movl %r12d, 84(%rsp)
imull %ebx, %r12d
movslq %r12d, %r12
salq $2, %r12
leaq 96(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
movl %eax, 8(%rsp)
movl $15, %edx
leaq .LC3(%rip), %rsi
movq %r14, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl 8(%rsp), %edi
call cudaGetErrorString@PLT
movq %rax, %r14
testq %rax, %rax
je .L67
movq %rax, %rdi
call strlen@PLT
movq %rax, %rdx
movq %r14, %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
.L31:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rip), %rdx
movq 240(%rdx,%rax), %r14
testq %r14, %r14
je .L68
cmpb $0, 56(%r14)
je .L34
movzbl 67(%r14), %esi
.L35:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %r14
movq %r14, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movl $1, %ecx
movq %r12, %rdx
movq 344(%rsp), %rsi
movq 96(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %r12d
movl $18, %edx
leaq .LC4(%rip), %rsi
movq %r14, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %r12d, %edi
call cudaGetErrorString@PLT
movq %rax, %r12
testq %rax, %rax
je .L69
movq %rax, %rdi
call strlen@PLT
movq %rax, %rdx
movq %r12, %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
.L37:
movl $1, %edx
leaq .LC2(%rip), %rsi
leaq _ZSt4cout(%rip), %r12
movq %r12, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %r12
testq %r12, %r12
je .L70
cmpb $0, 56(%r12)
je .L40
movzbl 67(%r12), %esi
.L41:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %r12
movq %r12, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movl %ebp, 120(%rsp)
movl %ebp, 112(%rsp)
movl %r15d, 116(%rsp)
imull %r15d, %ebp
movslq %ebp, %rbp
salq $2, %rbp
leaq 128(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl %eax, %r14d
movl $15, %edx
leaq .LC5(%rip), %rsi
movq %r12, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %r14d, %edi
call cudaGetErrorString@PLT
movq %rax, %r12
testq %rax, %rax
je .L71
movq %rax, %rdi
call strlen@PLT
movq %rax, %rdx
movq %r12, %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
.L43:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rip), %rdx
movq 240(%rdx,%rax), %r12
testq %r12, %r12
je .L72
cmpb $0, 56(%r12)
je .L46
movzbl 67(%r12), %esi
.L47:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movl $1, 32(%rsp)
movl %ebx, %ebx
movl $3435973837, %eax
imulq %rax, %rbx
shrq $36, %rbx
movl %ebx, 36(%rsp)
movl %r13d, %r13d
imulq %rax, %r13
shrq $36, %r13
movl %r13d, 40(%rsp)
movl $1, 44(%rsp)
movl $20, 24(%rsp)
movl $20, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L73
.L48:
call cudaThreadSynchronize@PLT
movl %eax, %ebx
movl $12, %edx
leaq .LC6(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %ebx, %edi
call cudaGetErrorString@PLT
movq %rax, %rbx
testq %rax, %rax
je .L74
movq %rax, %rdi
call strlen@PLT
movq %rax, %rdx
movq %rbx, %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
.L50:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rip), %rdx
movq 240(%rdx,%rax), %rbx
testq %rbx, %rbx
je .L75
cmpb $0, 56(%rbx)
je .L53
movzbl 67(%rbx), %esi
.L54:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %r12
movq %r12, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movl $2, %ecx
movq %rbp, %rdx
movq 128(%rsp), %rsi
movq 368(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
movl $22, %edx
leaq .LC7(%rip), %rsi
movq %r12, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %ebx, %edi
call cudaGetErrorString@PLT
movq %rax, %rbx
testq %rax, %rax
je .L76
movq %rax, %rdi
call strlen@PLT
movq %rax, %rdx
movq %rbx, %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
.L56:
movl $1, %edx
leaq .LC2(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbx
testq %rbx, %rbx
je .L77
cmpb $0, 56(%rbx)
je .L59
movzbl 67(%rbx), %esi
.L60:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 96(%rsp), %rdi
call cudaFree@PLT
movq 128(%rsp), %rdi
call cudaFree@PLT
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L78
addq $248, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L63:
.cfi_restore_state
leaq _ZSt4cout(%rip), %rdi
movq _ZSt4cout(%rip), %rax
addq -24(%rax), %rdi
movl 32(%rdi), %esi
orl $1, %esi
call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT
jmp .L19
.L64:
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L79
call _ZSt16__throw_bad_castv@PLT
.L79:
call __stack_chk_fail@PLT
.L22:
movq %r14, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r14), %rax
movl $10, %esi
movq %r14, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L23
.L65:
leaq _ZSt4cout(%rip), %rdi
movq _ZSt4cout(%rip), %rax
addq -24(%rax), %rdi
movl 32(%rdi), %esi
orl $1, %esi
call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT
jmp .L25
.L66:
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L80
call _ZSt16__throw_bad_castv@PLT
.L80:
call __stack_chk_fail@PLT
.L28:
movq %r14, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r14), %rax
movl $10, %esi
movq %r14, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L29
.L67:
leaq _ZSt4cout(%rip), %rdi
movq _ZSt4cout(%rip), %rax
addq -24(%rax), %rdi
movl 32(%rdi), %esi
orl $1, %esi
call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT
jmp .L31
.L68:
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L81
call _ZSt16__throw_bad_castv@PLT
.L81:
call __stack_chk_fail@PLT
.L34:
movq %r14, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r14), %rax
movl $10, %esi
movq %r14, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L35
.L69:
movq %r14, %rdi
movq _ZSt4cout(%rip), %rax
addq -24(%rax), %rdi
movl 32(%rdi), %esi
orl $1, %esi
call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT
jmp .L37
.L70:
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L82
call _ZSt16__throw_bad_castv@PLT
.L82:
call __stack_chk_fail@PLT
.L40:
movq %r12, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r12), %rax
movl $10, %esi
movq %r12, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L41
.L71:
leaq _ZSt4cout(%rip), %rdi
movq _ZSt4cout(%rip), %rax
addq -24(%rax), %rdi
movl 32(%rdi), %esi
orl $1, %esi
call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT
jmp .L43
.L72:
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L83
call _ZSt16__throw_bad_castv@PLT
.L83:
call __stack_chk_fail@PLT
.L46:
movq %r12, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r12), %rax
movl $10, %esi
movq %r12, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L47
.L73:
movdqa 48(%rsp), %xmm0
movaps %xmm0, 144(%rsp)
movq 64(%rsp), %rax
movq %rax, 160(%rsp)
movdqa 80(%rsp), %xmm1
movaps %xmm1, 176(%rsp)
movq 96(%rsp), %rax
movq %rax, 192(%rsp)
movdqa 112(%rsp), %xmm2
movaps %xmm2, 208(%rsp)
movq 128(%rsp), %rax
movq %rax, 224(%rsp)
leaq 208(%rsp), %rdx
leaq 176(%rsp), %rsi
leaq 144(%rsp), %rdi
call _Z41__device_stub__Z12MatMulKernel6MatrixS_S_R6MatrixS0_S0_
jmp .L48
.L74:
leaq _ZSt4cout(%rip), %rdi
movq _ZSt4cout(%rip), %rax
addq -24(%rax), %rdi
movl 32(%rdi), %esi
orl $1, %esi
call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT
jmp .L50
.L75:
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L84
call _ZSt16__throw_bad_castv@PLT
.L84:
call __stack_chk_fail@PLT
.L53:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L54
.L76:
movq %r12, %rdi
movq _ZSt4cout(%rip), %rax
addq -24(%rax), %rdi
movl 32(%rdi), %esi
orl $1, %esi
call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT
jmp .L56
.L77:
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L85
call _ZSt16__throw_bad_castv@PLT
.L85:
call __stack_chk_fail@PLT
.L59:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L60
.L78:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3672:
.size _Z6MatMul6MatrixS_S_, .-_Z6MatMul6MatrixS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC8:
.string "Usage: ./accuracy.o A.height A.width B.width"
.section .rodata.str1.1
.LC11:
.string "It took me "
.LC13:
.string " seconds.\n"
.LC14:
.string "\t"
.text
.globl main
.type main, @function
main:
.LFB3673:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $184, %rsp
.cfi_def_cfa_offset 240
movl %edi, %ebp
movq %rsi, %r12
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
cmpl $3, %ebp
jle .L145
.L87:
movq 8(%r12), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movq %rax, 64(%rsp)
movl %eax, 48(%rsp)
movq 16(%r12), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r15
movq %rax, 72(%rsp)
movl %eax, %ebx
movq 24(%r12), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r14
movq %rax, 56(%rsp)
movl %eax, 4(%rsp)
movl %ebp, %eax
imull %r15d, %eax
cltq
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L88
leaq 0(,%rax,4), %rdi
call _Znam@PLT
movq %rax, 8(%rsp)
movl %r15d, %eax
imull %r14d, %eax
cltq
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L146
leaq 0(,%rax,4), %rdi
call _Znam@PLT
movq %rax, 16(%rsp)
movl 64(%rsp), %eax
movl 56(%rsp), %ecx
imull %ecx, %eax
cltq
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L147
leaq 0(,%rax,4), %rdi
call _Znam@PLT
movq %rax, 32(%rsp)
cmpl $0, 64(%rsp)
jle .L93
movq 72(%rsp), %rax
movl %eax, %r13d
movl $0, %r12d
movl $0, %r15d
subl $1, %eax
movl %eax, 24(%rsp)
movq 8(%rsp), %rax
addq $4, %rax
movq %rax, 40(%rsp)
jmp .L94
.L145:
leaq .LC8(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
jmp .L87
.L88:
call __cxa_throw_bad_array_new_length@PLT
.L146:
call __cxa_throw_bad_array_new_length@PLT
.L147:
call __cxa_throw_bad_array_new_length@PLT
.L96:
movslq %r15d, %rsi
movq 8(%rsp), %rax
leaq (%rax,%rsi,4), %rbp
movl 24(%rsp), %eax
addq %rsi, %rax
movq 40(%rsp), %rcx
leaq (%rcx,%rax,4), %r14
.L95:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC9(%rip), %xmm0
mulss .LC10(%rip), %xmm0
movss %xmm0, 0(%rbp)
addq $4, %rbp
cmpq %r14, %rbp
jne .L95
.L97:
addl $1, %r12d
addl %r13d, %r15d
cmpl %r12d, 48(%rsp)
je .L93
.L94:
testl %ebx, %ebx
jg .L96
jmp .L97
.L93:
cmpl $0, 72(%rsp)
jle .L98
movq 56(%rsp), %rax
movl %eax, %r12d
movl $0, %r14d
movl $0, %r15d
subl $1, %eax
movl %eax, 48(%rsp)
movq 16(%rsp), %rax
addq $4, %rax
movq %rax, 24(%rsp)
movl %r12d, %eax
movl %r14d, %r12d
movl %eax, %r14d
jmp .L99
.L101:
movslq %r12d, %rsi
movq 16(%rsp), %rax
leaq (%rax,%rsi,4), %rbp
movl 48(%rsp), %eax
addq %rsi, %rax
movq 24(%rsp), %rcx
leaq (%rcx,%rax,4), %r13
.L100:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC9(%rip), %xmm0
mulss .LC10(%rip), %xmm0
movss %xmm0, 0(%rbp)
addq $4, %rbp
cmpq %r13, %rbp
jne .L100
.L102:
addl $1, %r15d
addl %r14d, %r12d
cmpl %r15d, %ebx
je .L98
.L99:
cmpl $0, 4(%rsp)
jg .L101
jmp .L102
.L98:
call clock@PLT
movq %rax, %rbp
movq 72(%rsp), %r14
movl %r14d, 80(%rsp)
movq 64(%rsp), %r15
movl %r15d, 84(%rsp)
movl %r14d, 88(%rsp)
movq 8(%rsp), %rcx
movq %rcx, 96(%rsp)
movq 56(%rsp), %rax
movl %eax, 112(%rsp)
movl %r14d, 116(%rsp)
movl %eax, 120(%rsp)
movq 16(%rsp), %rsi
movq %rsi, 128(%rsp)
movl %eax, 144(%rsp)
movl %r15d, 148(%rsp)
movl %eax, 152(%rsp)
movq 32(%rsp), %rax
movq %rax, 160(%rsp)
subq $80, %rsp
.cfi_def_cfa_offset 320
movdqa 224(%rsp), %xmm1
movups %xmm1, 48(%rsp)
movq %rax, 64(%rsp)
movdqa 192(%rsp), %xmm2
movups %xmm2, 24(%rsp)
movq %rsi, 40(%rsp)
movdqa 160(%rsp), %xmm3
movups %xmm3, (%rsp)
movq %rcx, 16(%rsp)
call _Z6MatMul6MatrixS_S_
addq $80, %rsp
.cfi_def_cfa_offset 240
call clock@PLT
subq %rbp, %rax
movq %rax, %r12
leaq .LC11(%rip), %rsi
leaq _ZSt4cout(%rip), %rbp
movq %rbp, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq (%rax), %rax
movq %rdi, %rdx
addq -24(%rax), %rdx
movl 24(%rdx), %eax
andl $-261, %eax
orl $4, %eax
movl %eax, 24(%rdx)
pxor %xmm0, %xmm0
cvtsi2ssq %r12, %xmm0
divss .LC12(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
leaq .LC13(%rip), %rsi
movq %rbp, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $10, %eax
cmpl %eax, %r15d
cmovle %r15d, %eax
movl %eax, 48(%rsp)
testl %r15d, %r15d
jle .L103
movl %r14d, 40(%rsp)
movl $0, 24(%rsp)
movl $0, %r15d
movl $10, %r13d
cmpl %r13d, %ebx
cmovle %ebx, %r13d
leaq .LC14(%rip), %r14
movl %ebx, 52(%rsp)
jmp .L104
.L148:
call _ZSt16__throw_bad_castv@PLT
.L107:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
.L108:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addl $1, %r15d
movl 40(%rsp), %ecx
addl %ecx, 24(%rsp)
movl 48(%rsp), %eax
cmpl %eax, %r15d
jge .L109
.L104:
cmpl $0, 52(%rsp)
jle .L111
movslq 24(%rsp), %rax
movq 8(%rsp), %rcx
leaq (%rcx,%rax,4), %r12
movl $0, %ebx
.L105:
movq 0(%rbp), %rax
movq %rbp, %rdx
addq -24(%rax), %rdx
movl 24(%rdx), %eax
andl $-261, %eax
orl $4, %eax
movl %eax, 24(%rdx)
pxor %xmm0, %xmm0
cvtss2sd (%r12,%rbx,4), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
movl $1, %edx
movq %r14, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $1, %rbx
cmpl %ebx, %r13d
jg .L105
.L111:
movq 0(%rbp), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %rbx
testq %rbx, %rbx
je .L148
cmpb $0, 56(%rbx)
je .L107
movzbl 67(%rbx), %esi
jmp .L108
.L149:
call _ZSt16__throw_bad_castv@PLT
.L115:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
.L116:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addl $1, %r15d
movl 40(%rsp), %ecx
addl %ecx, 24(%rsp)
movl 52(%rsp), %eax
cmpl %eax, %r15d
jge .L117
.L112:
cmpl $0, 4(%rsp)
jle .L119
movslq 24(%rsp), %rax
movq 16(%rsp), %rcx
leaq (%rcx,%rax,4), %r12
movl $0, %ebx
.L113:
movq 0(%rbp), %rax
movq %rbp, %rdx
addq -24(%rax), %rdx
movl 24(%rdx), %eax
andl $-261, %eax
orl $4, %eax
movl %eax, 24(%rdx)
pxor %xmm0, %xmm0
cvtss2sd (%r12,%rbx,4), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
movl $1, %edx
movq %r14, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $1, %rbx
cmpl %ebx, %r13d
jg .L113
.L119:
movq 0(%rbp), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %rbx
testq %rbx, %rbx
je .L149
cmpb $0, 56(%rbx)
je .L115
movzbl 67(%rbx), %esi
jmp .L116
.L117:
leaq _ZSt4cout(%rip), %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
cmpl $0, 64(%rsp)
jle .L120
.L128:
movl 56(%rsp), %eax
movl %eax, 40(%rsp)
movl $0, 24(%rsp)
movl $0, %r15d
movl $10, %r13d
movl 4(%rsp), %eax
cmpl %r13d, %eax
cmovle %eax, %r13d
leaq _ZSt4cout(%rip), %rbp
leaq .LC14(%rip), %r14
jmp .L121
.L150:
call _ZSt16__throw_bad_castv@PLT
.L124:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
.L125:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addl $1, %r15d
movl 40(%rsp), %ecx
addl %ecx, 24(%rsp)
movl 48(%rsp), %eax
cmpl %eax, %r15d
jge .L120
.L121:
cmpl $0, 4(%rsp)
jle .L127
movslq 24(%rsp), %rax
movq 32(%rsp), %rcx
leaq (%rcx,%rax,4), %r12
movl $0, %ebx
.L122:
movq 0(%rbp), %rax
movq %rbp, %rdx
addq -24(%rax), %rdx
movl 24(%rdx), %eax
andl $-261, %eax
orl $4, %eax
movl %eax, 24(%rdx)
pxor %xmm0, %xmm0
cvtss2sd (%r12,%rbx,4), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
movl $1, %edx
movq %r14, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $1, %rbx
cmpl %ebx, %r13d
jg .L122
.L127:
movq 0(%rbp), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %rbx
testq %rbx, %rbx
je .L150
cmpb $0, 56(%rbx)
je .L124
movzbl 67(%rbx), %esi
jmp .L125
.L151:
leaq _ZSt4cout(%rip), %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
jmp .L128
.L109:
leaq _ZSt4cout(%rip), %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $10, %eax
movq 72(%rsp), %rsi
cmpl %eax, %esi
cmovle %esi, %eax
movl %eax, %ecx
testl %esi, %esi
jle .L151
.L129:
movl 56(%rsp), %eax
movl %eax, 40(%rsp)
movl $0, 24(%rsp)
movl $0, %r15d
movl $10, %r13d
movl 4(%rsp), %eax
cmpl %r13d, %eax
cmovle %eax, %r13d
leaq _ZSt4cout(%rip), %rbp
leaq .LC14(%rip), %r14
movl %ecx, 52(%rsp)
jmp .L112
.L103:
leaq _ZSt4cout(%rip), %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $10, %eax
movq 72(%rsp), %rsi
cmpl %eax, %esi
cmovle %esi, %eax
movl %eax, %ecx
testl %esi, %esi
jg .L129
leaq _ZSt4cout(%rip), %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
.L120:
leaq _ZSt4cout(%rip), %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 8(%rsp), %rdi
call _ZdaPv@PLT
movq 16(%rsp), %rdi
call _ZdaPv@PLT
movq 32(%rsp), %rdi
call _ZdaPv@PLT
movl $0, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size main, .-main
.section .rodata.str1.1
.LC15:
.string "_Z12MatMulKernel6MatrixS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3701:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z12MatMulKernel6MatrixS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3701:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC9:
.long 805306368
.align 4
.LC10:
.long 1120403456
.align 4
.LC12:
.long 1232348160
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
Reference: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.
html#ixzz4CtH09yed
*/
#include <cstdlib>
#include <ctime>
#include <cstdio>
#include <iostream>
using namespace std;
// Generate random floats between 0 and UP_BOUND
#define UP_BOUND 100;
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 20
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
float value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaError_t err = cudaMalloc(&d_A.elements, size);
cout << "CUDA malloc A: " << cudaGetErrorString(err) << endl;
err = cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
cout << "Copy A to device: " << cudaGetErrorString(err) << "\n" << endl;
Matrix d_B;
d_B.width = d_B.stride = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
err = cudaMalloc(&d_B.elements, size);
cout << "CUDA malloc B: " << cudaGetErrorString(err) << endl;
err = cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
cout << "Copy B to device: " << cudaGetErrorString(err) << "\n" << endl;
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
err = cudaMalloc(&d_C.elements, size);
cout << "CUDA malloc C: " << cudaGetErrorString(err) << endl;
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
err = cudaThreadSynchronize();
cout << "Run kernel: " << cudaGetErrorString(err) << endl;
// Read C from device memory
err = cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
cout << "Copy C off of device: " << cudaGetErrorString(err) << "\n" << endl;
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0.0;
for (int i = 0; i < (A.width - 1)/BLOCK_SIZE + 1; ++i) {
int temp = i * BLOCK_SIZE + threadIdx.x;
if (row < A.height && temp < A.width)
As[threadIdx.y][threadIdx.x] = A.elements[row * A.width + temp];
else
As[threadIdx.y][threadIdx.x] = 0.0;
temp = i * BLOCK_SIZE + threadIdx.y;
if (col < B.width && temp < B.height)
Bs[threadIdx.y][threadIdx.x] = B.elements[temp * B.width + col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; ++j)
Cvalue += As[threadIdx.y][j] * Bs[j][threadIdx.x];
__syncthreads();
}
if (row < C.height && col < C.width)
C.elements[row * C.width + col] = Cvalue;
/*---Original code from CUDA C Programming Guide---*/
/*
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
*/
}
int main(int argc, char const *argv[])
{
clock_t t;
Matrix A, B, C;
int a1, a2, b1, b2;
int i, j;
srand(time(NULL));
if (argc < 4)
cout << "Usage: ./accuracy.o A.height A.width B.width" << endl;
// Get dimensions of A and B
// Run $ ./matrixMul 1 1000000 400
a1 = atoi(argv[1]); // A's height
a2 = atoi(argv[2]); // A's width
b1 = a2; // B's height
b2 = atoi(argv[3]); // B's width
A.height = a1;
A.width = A.stride = a2;
A.elements = new float[A.width * A.height];
B.height = b1;
B.width = B.stride = b2;
B.elements = new float[B.width * B. height];
C.height = A.height;
C.width = C.stride = B.width;
C.elements = new float[C.width * C.height];
// Fill A and B with random floats
for (i = 0; i < A.height; ++i)
for (j = 0; j < A.width; ++j)
A.elements[i * A.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND;
for (i = 0; i < B.height; ++i)
for (j = 0; j < B.width; ++j)
B.elements[i * B.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND;
// Call MatMul(), and therefore MatMulKernel()
t = clock();
MatMul(A, B, C);
// Print time multiplication took
t = clock() - t;
cout << "It took me " << fixed << ((float)t)/CLOCKS_PER_SEC;
cout << " seconds.\n" << endl;
// Print A, B, and C
for (i = 0; i < min(10, A.height); ++i) {
for (j = 0; j < min(10, A.width); ++j)
cout << fixed << A.elements[i * A.width + j] << "\t";
cout << endl;
}
cout << endl;
for (i = 0; i < min(10, B.height); ++i) {
for (j = 0; j < min(10, B.width); ++j)
cout << fixed << B.elements[i * B.width + j] << "\t";
cout << endl;
}
cout << endl;
for (i = 0; i < min(10, C.height); ++i) {
for (j = 0; j < min(10, C.width); ++j)
cout << fixed << C.elements[i * C.width + j] << "\t";
cout << endl;
}
cout << endl;
delete[] A.elements;
delete[] B.elements;
delete[] C.elements;
return 0;
} | /*
Reference: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.
html#ixzz4CtH09yed
*/
#include <hip/hip_runtime.h>
#include <cstdlib>
#include <ctime>
#include <cstdio>
#include <iostream>
using namespace std;
// Generate random floats between 0 and UP_BOUND
#define UP_BOUND 100;
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 20
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
float value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipError_t err = hipMalloc(&d_A.elements, size);
cout << "CUDA malloc A: " << hipGetErrorString(err) << endl;
err = hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
cout << "Copy A to device: " << hipGetErrorString(err) << "\n" << endl;
Matrix d_B;
d_B.width = d_B.stride = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
err = hipMalloc(&d_B.elements, size);
cout << "CUDA malloc B: " << hipGetErrorString(err) << endl;
err = hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
cout << "Copy B to device: " << hipGetErrorString(err) << "\n" << endl;
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
err = hipMalloc(&d_C.elements, size);
cout << "CUDA malloc C: " << hipGetErrorString(err) << endl;
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
err = hipDeviceSynchronize();
cout << "Run kernel: " << hipGetErrorString(err) << endl;
// Read C from device memory
err = hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
cout << "Copy C off of device: " << hipGetErrorString(err) << "\n" << endl;
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0.0;
for (int i = 0; i < (A.width - 1)/BLOCK_SIZE + 1; ++i) {
int temp = i * BLOCK_SIZE + threadIdx.x;
if (row < A.height && temp < A.width)
As[threadIdx.y][threadIdx.x] = A.elements[row * A.width + temp];
else
As[threadIdx.y][threadIdx.x] = 0.0;
temp = i * BLOCK_SIZE + threadIdx.y;
if (col < B.width && temp < B.height)
Bs[threadIdx.y][threadIdx.x] = B.elements[temp * B.width + col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; ++j)
Cvalue += As[threadIdx.y][j] * Bs[j][threadIdx.x];
__syncthreads();
}
if (row < C.height && col < C.width)
C.elements[row * C.width + col] = Cvalue;
/*---Original code from CUDA C Programming Guide---*/
/*
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
*/
}
int main(int argc, char const *argv[])
{
clock_t t;
Matrix A, B, C;
int a1, a2, b1, b2;
int i, j;
srand(time(NULL));
if (argc < 4)
cout << "Usage: ./accuracy.o A.height A.width B.width" << endl;
// Get dimensions of A and B
// Run $ ./matrixMul 1 1000000 400
a1 = atoi(argv[1]); // A's height
a2 = atoi(argv[2]); // A's width
b1 = a2; // B's height
b2 = atoi(argv[3]); // B's width
A.height = a1;
A.width = A.stride = a2;
A.elements = new float[A.width * A.height];
B.height = b1;
B.width = B.stride = b2;
B.elements = new float[B.width * B. height];
C.height = A.height;
C.width = C.stride = B.width;
C.elements = new float[C.width * C.height];
// Fill A and B with random floats
for (i = 0; i < A.height; ++i)
for (j = 0; j < A.width; ++j)
A.elements[i * A.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND;
for (i = 0; i < B.height; ++i)
for (j = 0; j < B.width; ++j)
B.elements[i * B.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND;
// Call MatMul(), and therefore MatMulKernel()
t = clock();
MatMul(A, B, C);
// Print time multiplication took
t = clock() - t;
cout << "It took me " << fixed << ((float)t)/CLOCKS_PER_SEC;
cout << " seconds.\n" << endl;
// Print A, B, and C
for (i = 0; i < min(10, A.height); ++i) {
for (j = 0; j < min(10, A.width); ++j)
cout << fixed << A.elements[i * A.width + j] << "\t";
cout << endl;
}
cout << endl;
for (i = 0; i < min(10, B.height); ++i) {
for (j = 0; j < min(10, B.width); ++j)
cout << fixed << B.elements[i * B.width + j] << "\t";
cout << endl;
}
cout << endl;
for (i = 0; i < min(10, C.height); ++i) {
for (j = 0; j < min(10, C.width); ++j)
cout << fixed << C.elements[i * C.width + j] << "\t";
cout << endl;
}
cout << endl;
delete[] A.elements;
delete[] B.elements;
delete[] C.elements;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
Reference: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.
html#ixzz4CtH09yed
*/
#include <hip/hip_runtime.h>
#include <cstdlib>
#include <ctime>
#include <cstdio>
#include <iostream>
using namespace std;
// Generate random floats between 0 and UP_BOUND
#define UP_BOUND 100;
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 20
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
float value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipError_t err = hipMalloc(&d_A.elements, size);
cout << "CUDA malloc A: " << hipGetErrorString(err) << endl;
err = hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
cout << "Copy A to device: " << hipGetErrorString(err) << "\n" << endl;
Matrix d_B;
d_B.width = d_B.stride = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
err = hipMalloc(&d_B.elements, size);
cout << "CUDA malloc B: " << hipGetErrorString(err) << endl;
err = hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
cout << "Copy B to device: " << hipGetErrorString(err) << "\n" << endl;
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
err = hipMalloc(&d_C.elements, size);
cout << "CUDA malloc C: " << hipGetErrorString(err) << endl;
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
err = hipDeviceSynchronize();
cout << "Run kernel: " << hipGetErrorString(err) << endl;
// Read C from device memory
err = hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
cout << "Copy C off of device: " << hipGetErrorString(err) << "\n" << endl;
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0.0;
for (int i = 0; i < (A.width - 1)/BLOCK_SIZE + 1; ++i) {
int temp = i * BLOCK_SIZE + threadIdx.x;
if (row < A.height && temp < A.width)
As[threadIdx.y][threadIdx.x] = A.elements[row * A.width + temp];
else
As[threadIdx.y][threadIdx.x] = 0.0;
temp = i * BLOCK_SIZE + threadIdx.y;
if (col < B.width && temp < B.height)
Bs[threadIdx.y][threadIdx.x] = B.elements[temp * B.width + col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; ++j)
Cvalue += As[threadIdx.y][j] * Bs[j][threadIdx.x];
__syncthreads();
}
if (row < C.height && col < C.width)
C.elements[row * C.width + col] = Cvalue;
/*---Original code from CUDA C Programming Guide---*/
/*
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
*/
}
int main(int argc, char const *argv[])
{
clock_t t;
Matrix A, B, C;
int a1, a2, b1, b2;
int i, j;
srand(time(NULL));
if (argc < 4)
cout << "Usage: ./accuracy.o A.height A.width B.width" << endl;
// Get dimensions of A and B
// Run $ ./matrixMul 1 1000000 400
a1 = atoi(argv[1]); // A's height
a2 = atoi(argv[2]); // A's width
b1 = a2; // B's height
b2 = atoi(argv[3]); // B's width
A.height = a1;
A.width = A.stride = a2;
A.elements = new float[A.width * A.height];
B.height = b1;
B.width = B.stride = b2;
B.elements = new float[B.width * B. height];
C.height = A.height;
C.width = C.stride = B.width;
C.elements = new float[C.width * C.height];
// Fill A and B with random floats
for (i = 0; i < A.height; ++i)
for (j = 0; j < A.width; ++j)
A.elements[i * A.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND;
for (i = 0; i < B.height; ++i)
for (j = 0; j < B.width; ++j)
B.elements[i * B.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND;
// Call MatMul(), and therefore MatMulKernel()
t = clock();
MatMul(A, B, C);
// Print time multiplication took
t = clock() - t;
cout << "It took me " << fixed << ((float)t)/CLOCKS_PER_SEC;
cout << " seconds.\n" << endl;
// Print A, B, and C
for (i = 0; i < min(10, A.height); ++i) {
for (j = 0; j < min(10, A.width); ++j)
cout << fixed << A.elements[i * A.width + j] << "\t";
cout << endl;
}
cout << endl;
for (i = 0; i < min(10, B.height); ++i) {
for (j = 0; j < min(10, B.width); ++j)
cout << fixed << B.elements[i * B.width + j] << "\t";
cout << endl;
}
cout << endl;
for (i = 0; i < min(10, C.height); ++i) {
for (j = 0; j < min(10, C.width); ++j)
cout << fixed << C.elements[i * C.width + j] << "\t";
cout << endl;
}
cout << endl;
delete[] A.elements;
delete[] B.elements;
delete[] C.elements;
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12MatMulKernel6MatrixS_S_
.globl _Z12MatMulKernel6MatrixS_S_
.p2align 8
.type _Z12MatMulKernel6MatrixS_S_,@function
_Z12MatMulKernel6MatrixS_S_:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x0
s_load_b64 s[8:9], s[0:1], 0x40
v_bfe_u32 v3, v0, 10, 10
v_and_b32_e32 v4, 0x3ff, v0
s_delay_alu instid0(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s15, 20, v[3:4]
v_mad_u64_u32 v[1:2], null, s14, 20, v[4:5]
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_cmpk_lt_i32 s3, 0xffee
s_cbranch_scc1 .LBB0_14
s_clause 0x2
s_load_b32 s2, s[0:1], 0x4
s_load_b128 s[4:7], s[0:1], 0x10
s_load_b64 s[10:11], s[0:1], 0x28
v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v9, 2, v4
s_add_i32 s13, s3, -1
v_mov_b32_e32 v7, 0
v_mul_lo_u32 v6, s3, v0
s_delay_alu instid0(VALU_DEP_3)
v_add_nc_u32_e32 v8, 0x640, v9
s_mul_hi_i32 s13, s13, 0x66666667
v_mul_u32_u24_e32 v5, 0x50, v3
s_lshr_b32 s14, s13, 31
s_ashr_i32 s13, s13, 3
v_mad_u32_u24 v9, v3, 0x50, v9
v_mad_u32_u24 v10, v3, 0x50, v8
s_add_i32 s13, s13, s14
s_mov_b32 s12, 0
s_max_i32 s13, s13, 0
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s2, v0
v_cmp_gt_i32_e64 s2, s6, v1
s_xor_b32 s14, vcc_lo, -1
s_delay_alu instid0(VALU_DEP_1)
s_xor_b32 s2, s2, -1
.LBB0_2:
s_mul_i32 s15, s12, 20
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v11, s15, v4
v_cmp_le_i32_e32 vcc_lo, s3, v11
s_or_b32 s16, s14, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s17, s16
s_xor_b32 s16, exec_lo, s17
s_cbranch_execz .LBB0_4
ds_store_b32 v9, v7
.LBB0_4:
s_and_not1_saveexec_b32 s16, s16
s_cbranch_execz .LBB0_6
v_add_nc_u32_e32 v11, v11, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v12, 31, v11
v_lshlrev_b64 v[11:12], 2, v[11:12]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v11, vcc_lo, s4, v11
v_add_co_ci_u32_e32 v12, vcc_lo, s5, v12, vcc_lo
global_load_b32 v11, v[11:12], off
s_waitcnt vmcnt(0)
ds_store_b32 v9, v11
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s16
v_add_nc_u32_e32 v11, s15, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e32 vcc_lo, s7, v11
s_or_b32 s15, s2, vcc_lo
s_and_saveexec_b32 s16, s15
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s15, exec_lo, s16
s_cbranch_execz .LBB0_8
ds_store_b32 v10, v7
.LBB0_8:
s_and_not1_saveexec_b32 s15, s15
s_cbranch_execz .LBB0_10
v_mad_u64_u32 v[12:13], null, v11, s6, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v13, 31, v12
v_lshlrev_b64 v[11:12], 2, v[12:13]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v11, vcc_lo, s10, v11
v_add_co_ci_u32_e32 v12, vcc_lo, s11, v12, vcc_lo
global_load_b32 v11, v[11:12], off
s_waitcnt vmcnt(0)
ds_store_b32 v10, v11
.LBB0_10:
s_or_b32 exec_lo, exec_lo, s15
v_mov_b32_e32 v11, v8
s_mov_b32 s15, 0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.LBB0_11:
v_add_nc_u32_e32 v12, s15, v5
s_add_i32 s15, s15, 4
ds_load_b32 v13, v11
ds_load_b32 v12, v12
v_add_nc_u32_e32 v11, 0x50, v11
s_cmpk_eq_i32 s15, 0x50
s_waitcnt lgkmcnt(0)
v_fmac_f32_e32 v2, v12, v13
s_cbranch_scc0 .LBB0_11
s_add_i32 s15, s12, 1
s_cmp_eq_u32 s12, s13
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_14
s_mov_b32 s12, s15
s_branch .LBB0_2
.LBB0_14:
s_load_b64 s[2:3], s[0:1], 0x30
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s3, v0
v_cmp_gt_i32_e64 s0, s2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s0, vcc_lo, s0
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_16
v_mad_u64_u32 v[3:4], null, s2, v0, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s8, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s9, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_16:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12MatMulKernel6MatrixS_S_
.amdhsa_group_segment_fixed_size 3200
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 72
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12MatMulKernel6MatrixS_S_, .Lfunc_end0-_Z12MatMulKernel6MatrixS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 24
.value_kind: by_value
- .offset: 24
.size: 24
.value_kind: by_value
- .offset: 48
.size: 24
.value_kind: by_value
.group_segment_fixed_size: 3200
.kernarg_segment_align: 8
.kernarg_segment_size: 72
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12MatMulKernel6MatrixS_S_
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z12MatMulKernel6MatrixS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
Reference: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.
html#ixzz4CtH09yed
*/
#include <hip/hip_runtime.h>
#include <cstdlib>
#include <ctime>
#include <cstdio>
#include <iostream>
using namespace std;
// Generate random floats between 0 and UP_BOUND
#define UP_BOUND 100;
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 20
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
float value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipError_t err = hipMalloc(&d_A.elements, size);
cout << "CUDA malloc A: " << hipGetErrorString(err) << endl;
err = hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
cout << "Copy A to device: " << hipGetErrorString(err) << "\n" << endl;
Matrix d_B;
d_B.width = d_B.stride = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
err = hipMalloc(&d_B.elements, size);
cout << "CUDA malloc B: " << hipGetErrorString(err) << endl;
err = hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
cout << "Copy B to device: " << hipGetErrorString(err) << "\n" << endl;
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
err = hipMalloc(&d_C.elements, size);
cout << "CUDA malloc C: " << hipGetErrorString(err) << endl;
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
err = hipDeviceSynchronize();
cout << "Run kernel: " << hipGetErrorString(err) << endl;
// Read C from device memory
err = hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
cout << "Copy C off of device: " << hipGetErrorString(err) << "\n" << endl;
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0.0;
for (int i = 0; i < (A.width - 1)/BLOCK_SIZE + 1; ++i) {
int temp = i * BLOCK_SIZE + threadIdx.x;
if (row < A.height && temp < A.width)
As[threadIdx.y][threadIdx.x] = A.elements[row * A.width + temp];
else
As[threadIdx.y][threadIdx.x] = 0.0;
temp = i * BLOCK_SIZE + threadIdx.y;
if (col < B.width && temp < B.height)
Bs[threadIdx.y][threadIdx.x] = B.elements[temp * B.width + col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; ++j)
Cvalue += As[threadIdx.y][j] * Bs[j][threadIdx.x];
__syncthreads();
}
if (row < C.height && col < C.width)
C.elements[row * C.width + col] = Cvalue;
/*---Original code from CUDA C Programming Guide---*/
/*
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
*/
}
int main(int argc, char const *argv[])
{
clock_t t;
Matrix A, B, C;
int a1, a2, b1, b2;
int i, j;
srand(time(NULL));
if (argc < 4)
cout << "Usage: ./accuracy.o A.height A.width B.width" << endl;
// Get dimensions of A and B
// Run $ ./matrixMul 1 1000000 400
a1 = atoi(argv[1]); // A's height
a2 = atoi(argv[2]); // A's width
b1 = a2; // B's height
b2 = atoi(argv[3]); // B's width
A.height = a1;
A.width = A.stride = a2;
A.elements = new float[A.width * A.height];
B.height = b1;
B.width = B.stride = b2;
B.elements = new float[B.width * B. height];
C.height = A.height;
C.width = C.stride = B.width;
C.elements = new float[C.width * C.height];
// Fill A and B with random floats
for (i = 0; i < A.height; ++i)
for (j = 0; j < A.width; ++j)
A.elements[i * A.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND;
for (i = 0; i < B.height; ++i)
for (j = 0; j < B.width; ++j)
B.elements[i * B.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND;
// Call MatMul(), and therefore MatMulKernel()
t = clock();
MatMul(A, B, C);
// Print time multiplication took
t = clock() - t;
cout << "It took me " << fixed << ((float)t)/CLOCKS_PER_SEC;
cout << " seconds.\n" << endl;
// Print A, B, and C
for (i = 0; i < min(10, A.height); ++i) {
for (j = 0; j < min(10, A.width); ++j)
cout << fixed << A.elements[i * A.width + j] << "\t";
cout << endl;
}
cout << endl;
for (i = 0; i < min(10, B.height); ++i) {
for (j = 0; j < min(10, B.width); ++j)
cout << fixed << B.elements[i * B.width + j] << "\t";
cout << endl;
}
cout << endl;
for (i = 0; i < min(10, C.height); ++i) {
for (j = 0; j < min(10, C.width); ++j)
cout << fixed << C.elements[i * C.width + j] << "\t";
cout << endl;
}
cout << endl;
delete[] A.elements;
delete[] B.elements;
delete[] C.elements;
return 0;
} | .text
.file "share.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z6MatMul6MatrixS_S_ # -- Begin function _Z6MatMul6MatrixS_S_
.p2align 4, 0x90
.type _Z6MatMul6MatrixS_S_,@function
_Z6MatMul6MatrixS_S_: # @_Z6MatMul6MatrixS_S_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $248, %rsp
.cfi_def_cfa_offset 304
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl 304(%rsp), %eax
movl 308(%rsp), %r15d
movl %eax, 64(%rsp)
movl %eax, 56(%rsp)
movl %r15d, 60(%rsp)
imull %r15d, %eax
movslq %eax, %rbx
shlq $2, %rbx
leaq 72(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movl %eax, %ebp
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $15, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl %ebp, %edi
callq hipGetErrorString
testq %rax, %rax
je .LBB0_1
# %bb.2:
movq %rax, %rdi
movq %rax, %r14
callq strlen
movl $_ZSt4cout, %edi
movq %r14, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB0_3
.LBB0_1:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rax), %rdi
movl _ZSt4cout+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB0_3: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB0_52
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
leaq 304(%rsp), %r12
cmpb $0, 56(%r14)
je .LBB0_6
# %bb.5:
movzbl 67(%r14), %eax
jmp .LBB0_7
.LBB0_6:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB0_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 72(%rsp), %rdi
movq 16(%r12), %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movl %eax, %ebx
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $18, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl %ebx, %edi
callq hipGetErrorString
testq %rax, %rax
je .LBB0_8
# %bb.9:
movq %rax, %rdi
movq %rax, %rbx
callq strlen
movl $_ZSt4cout, %edi
movq %rbx, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB0_10
.LBB0_8:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rax), %rdi
movl _ZSt4cout+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB0_10: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit20
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB0_52
# %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i32
leaq 328(%rsp), %r12
cmpb $0, 56(%rbx)
je .LBB0_13
# %bb.12:
movzbl 67(%rbx), %eax
jmp .LBB0_14
.LBB0_13:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB0_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit35
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl (%r12), %r13d
movl %r13d, 40(%rsp)
movl %r13d, 32(%rsp)
movl 4(%r12), %eax
movl %eax, 36(%rsp)
imull %r13d, %eax
movslq %eax, %rbx
shlq $2, %rbx
leaq 48(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movl %eax, %ebp
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $15, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl %ebp, %edi
callq hipGetErrorString
testq %rax, %rax
je .LBB0_15
# %bb.16:
movq %rax, %rdi
movq %rax, %r14
callq strlen
movl $_ZSt4cout, %edi
movq %r14, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB0_17
.LBB0_15:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rax), %rdi
movl _ZSt4cout+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB0_17: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit22
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB0_52
# %bb.18: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i37
cmpb $0, 56(%r14)
je .LBB0_20
# %bb.19:
movzbl 67(%r14), %eax
jmp .LBB0_21
.LBB0_20:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB0_21: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit40
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 48(%rsp), %rdi
movq 16(%r12), %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movl %eax, %ebx
movl $_ZSt4cout, %edi
movl $.L.str.4, %esi
movl $18, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl %ebx, %edi
callq hipGetErrorString
testq %rax, %rax
je .LBB0_22
# %bb.23:
movq %rax, %rdi
movq %rax, %rbx
callq strlen
movl $_ZSt4cout, %edi
movq %rbx, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB0_24
.LBB0_22:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rax), %rdi
movl _ZSt4cout+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB0_24: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit24
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB0_52
# %bb.25: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i42
leaq 352(%rsp), %r12
cmpb $0, 56(%rbx)
je .LBB0_27
# %bb.26:
movzbl 67(%rbx), %eax
jmp .LBB0_28
.LBB0_27:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB0_28: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit45
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl (%r12), %eax
movl %eax, 16(%rsp)
movl %eax, 8(%rsp)
movl 4(%r12), %ecx
movl %ecx, 12(%rsp)
imull %eax, %ecx
movslq %ecx, %rbx
shlq $2, %rbx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movl %eax, %ebp
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $15, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl %ebp, %edi
callq hipGetErrorString
testq %rax, %rax
je .LBB0_29
# %bb.30:
movq %rax, %rdi
movq %rax, %r14
callq strlen
movl $_ZSt4cout, %edi
movq %r14, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB0_31
.LBB0_29:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rax), %rdi
movl _ZSt4cout+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB0_31: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit26
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB0_52
# %bb.32: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i47
cmpb $0, 56(%r14)
je .LBB0_34
# %bb.33:
movzbl 67(%r14), %eax
jmp .LBB0_35
.LBB0_34:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB0_35: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit50
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl %r13d, %eax
movl $3435973837, %ecx # imm = 0xCCCCCCCD
imulq %rcx, %rax
shrq $36, %rax
movl %r15d, %edx
imulq %rcx, %rdx
shrq $4, %rdx
movabsq $1152921500311879680, %rdi # imm = 0xFFFFFFF00000000
andq %rdx, %rdi
orq %rax, %rdi
movabsq $85899345940, %rdx # imm = 0x1400000014
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB0_37
# %bb.36:
movq 72(%rsp), %rax
movq %rax, 176(%rsp)
movups 56(%rsp), %xmm0
movaps %xmm0, 160(%rsp)
movq 48(%rsp), %rax
movq %rax, 208(%rsp)
movups 32(%rsp), %xmm0
movaps %xmm0, 192(%rsp)
movq 24(%rsp), %rax
movq %rax, 240(%rsp)
movups 8(%rsp), %xmm0
movaps %xmm0, 224(%rsp)
leaq 160(%rsp), %rax
movq %rax, 128(%rsp)
leaq 192(%rsp), %rax
movq %rax, 136(%rsp)
leaq 224(%rsp), %rax
movq %rax, 144(%rsp)
leaq 112(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 88(%rsp), %rdx
leaq 80(%rsp), %rcx
callq __hipPopCallConfiguration
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
movq 96(%rsp), %rcx
movl 104(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z12MatMulKernel6MatrixS_S_, %edi
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB0_37:
callq hipDeviceSynchronize
movl %eax, %ebp
movl $_ZSt4cout, %edi
movl $.L.str.6, %esi
movl $12, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl %ebp, %edi
callq hipGetErrorString
testq %rax, %rax
je .LBB0_38
# %bb.39:
movq %rax, %rdi
movq %rax, %r14
callq strlen
movl $_ZSt4cout, %edi
movq %r14, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB0_40
.LBB0_38:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rax), %rdi
movl _ZSt4cout+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB0_40: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit28
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB0_52
# %bb.41: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i52
cmpb $0, 56(%r14)
je .LBB0_43
# %bb.42:
movzbl 67(%r14), %eax
jmp .LBB0_44
.LBB0_43:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB0_44: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit55
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 16(%r12), %rdi
movq 24(%rsp), %rsi
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
movl %eax, %ebx
movl $_ZSt4cout, %edi
movl $.L.str.7, %esi
movl $22, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl %ebx, %edi
callq hipGetErrorString
testq %rax, %rax
je .LBB0_45
# %bb.46:
movq %rax, %rdi
movq %rax, %rbx
callq strlen
movl $_ZSt4cout, %edi
movq %rbx, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB0_47
.LBB0_45:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rax), %rdi
movl _ZSt4cout+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB0_47: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit30
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB0_52
# %bb.48: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i57
cmpb $0, 56(%rbx)
je .LBB0_50
# %bb.49:
movzbl 67(%rbx), %eax
jmp .LBB0_51
.LBB0_50:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB0_51: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit60
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 72(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
addq $248, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_52:
.cfi_def_cfa_offset 304
callq _ZSt16__throw_bad_castv
.Lfunc_end0:
.size _Z6MatMul6MatrixS_S_, .Lfunc_end0-_Z6MatMul6MatrixS_S_
.cfi_endproc
# -- End function
.globl _Z27__device_stub__MatMulKernel6MatrixS_S_ # -- Begin function _Z27__device_stub__MatMulKernel6MatrixS_S_
.p2align 4, 0x90
.type _Z27__device_stub__MatMulKernel6MatrixS_S_,@function
_Z27__device_stub__MatMulKernel6MatrixS_S_: # @_Z27__device_stub__MatMulKernel6MatrixS_S_
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
leaq 80(%rsp), %rax
movq %rax, 48(%rsp)
leaq 104(%rsp), %rax
movq %rax, 56(%rsp)
leaq 128(%rsp), %rax
movq %rax, 64(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z12MatMulKernel6MatrixS_S_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end1:
.size _Z27__device_stub__MatMulKernel6MatrixS_S_, .Lfunc_end1-_Z27__device_stub__MatMulKernel6MatrixS_S_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI2_0:
.long 0x30000000 # float 4.65661287E-10
.LCPI2_1:
.long 0x42c80000 # float 100
.LCPI2_2:
.long 0x49742400 # float 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $216, %rsp
.cfi_def_cfa_offset 272
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movl %edi, %ebp
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
cmpl $3, %ebp
jg .LBB2_6
# %bb.1:
movl $_ZSt4cout, %edi
movl $.L.str.8, %esi
movl $44, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB2_65
# %bb.2: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r14)
je .LBB2_4
# %bb.3:
movzbl 67(%r14), %eax
jmp .LBB2_5
.LBB2_4:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_5: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
.LBB2_6:
movq 8(%rbx), %rdi
xorl %r15d, %r15d
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r12
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r13
movq 24(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movl %r13d, %eax
imull %r12d, %eax
cltq
leaq (,%rax,4), %rdi
testl %eax, %eax
movq $-1, %rbx
cmovsq %rbx, %rdi
callq _Znam
movq %rax, 120(%rsp) # 8-byte Spill
movl %r14d, %eax
imull %r13d, %eax
cltq
leaq (,%rax,4), %rdi
testl %eax, %eax
cmovsq %rbx, %rdi
callq _Znam
movq %rax, 112(%rsp) # 8-byte Spill
movq %r14, 88(%rsp) # 8-byte Spill
movl %r14d, %eax
imull %r12d, %eax
cltq
leaq (,%rax,4), %rdi
testl %eax, %eax
cmovsq %rbx, %rdi
callq _Znam
movq %rax, 128(%rsp) # 8-byte Spill
movq %r12, 104(%rsp) # 8-byte Spill
testl %r12d, %r12d
movq %r13, 80(%rsp) # 8-byte Spill
jle .LBB2_12
# %bb.7: # %.preheader118.lr.ph
movl 104(%rsp), %ebx # 4-byte Reload
movl %r13d, %r14d
xorl %r12d, %r12d
jmp .LBB2_8
.p2align 4, 0x90
.LBB2_11: # %._crit_edge
# in Loop: Header=BB2_8 Depth=1
incq %r12
movq 80(%rsp), %r13 # 8-byte Reload
addl %r13d, %r15d
cmpq %rbx, %r12
je .LBB2_12
.LBB2_8: # %.preheader118
# =>This Loop Header: Depth=1
# Child Loop BB2_10 Depth 2
testl %r13d, %r13d
jle .LBB2_11
# %bb.9: # %.lr.ph
# in Loop: Header=BB2_8 Depth=1
movl %r15d, %eax
movq 120(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB2_10: # Parent Loop BB2_8 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI2_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss .LCPI2_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss %xmm1, %xmm0
movss %xmm0, (%r13,%rbp,4)
incq %rbp
cmpq %rbp, %r14
jne .LBB2_10
jmp .LBB2_11
.LBB2_12: # %.preheader117
testl %r13d, %r13d
movq 88(%rsp), %rax # 8-byte Reload
jle .LBB2_18
# %bb.13: # %.preheader116.lr.ph
movl 80(%rsp), %ebx # 4-byte Reload
movl %eax, %r14d
xorl %r15d, %r15d
xorl %r12d, %r12d
jmp .LBB2_14
.p2align 4, 0x90
.LBB2_17: # %._crit_edge124
# in Loop: Header=BB2_14 Depth=1
incq %r12
movq 88(%rsp), %rax # 8-byte Reload
addl %eax, %r15d
cmpq %rbx, %r12
je .LBB2_18
.LBB2_14: # %.preheader116
# =>This Loop Header: Depth=1
# Child Loop BB2_16 Depth 2
testl %eax, %eax
jle .LBB2_17
# %bb.15: # %.lr.ph123
# in Loop: Header=BB2_14 Depth=1
movl %r15d, %eax
movq 112(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB2_16: # Parent Loop BB2_14 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI2_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss .LCPI2_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss %xmm1, %xmm0
movss %xmm0, (%r13,%rbp,4)
incq %rbp
cmpq %rbp, %r14
jne .LBB2_16
jmp .LBB2_17
.LBB2_18: # %._crit_edge126
movq %rax, %rbx
callq clock
movq %rax, %r13
movq 80(%rsp), %rbp # 8-byte Reload
movl %ebp, 168(%rsp)
movq 104(%rsp), %rcx # 8-byte Reload
movl %ecx, 172(%rsp)
movl %ebp, 176(%rsp)
movq 120(%rsp), %rax # 8-byte Reload
movq %rax, 184(%rsp)
movl %ebx, 144(%rsp)
movl %ebp, 148(%rsp)
movl %ebx, 152(%rsp)
movq 112(%rsp), %rax # 8-byte Reload
movq %rax, 160(%rsp)
movl %ebx, 192(%rsp)
movl %ecx, 196(%rsp)
movl %ebx, 200(%rsp)
movq 128(%rsp), %rax # 8-byte Reload
movq %rax, 208(%rsp)
movq %rax, 64(%rsp)
movups 192(%rsp), %xmm0
movups %xmm0, 48(%rsp)
movq 160(%rsp), %rax
movq %rax, 40(%rsp)
movups 144(%rsp), %xmm0
movups %xmm0, 24(%rsp)
movq 184(%rsp), %rax
movq %rax, 16(%rsp)
movups 168(%rsp), %xmm0
movups %xmm0, (%rsp)
callq _Z6MatMul6MatrixS_S_
callq clock
movq %rax, %r15
subq %r13, %r15
movl $_ZSt4cout, %edi
movl $.L.str.9, %esi
movl $11, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movl $-261, %ecx # imm = 0xFEFB
andl _ZSt4cout+24(%rax), %ecx
orl $4, %ecx
xorps %xmm0, %xmm0
cvtsi2ss %r15, %xmm0
movl %ecx, _ZSt4cout+24(%rax)
divss .LCPI2_2(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $_ZSt4cout, %edi
movl $.L.str.10, %esi
movl $10, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r13
testq %r13, %r13
je .LBB2_65
# %bb.19: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i80
cmpb $0, 56(%r13)
je .LBB2_21
# %bb.20:
movzbl 67(%r13), %eax
jmp .LBB2_22
.LBB2_21:
movq %r13, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_22: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit83
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 104(%rsp), %rax # 8-byte Reload
cmpl $10, %eax
movl $10, %r15d
movl $10, %ecx
cmovll %eax, %ecx
movl %ecx, 100(%rsp) # 4-byte Spill
testl %eax, %eax
jle .LBB2_32
# %bb.23: # %.preheader115.lr.ph
cmpl $10, %ebp
cmovll %ebp, %r15d
cmpl $2, %r15d
movl $1, %ecx
cmovll %ecx, %r15d
movl 100(%rsp), %eax # 4-byte Reload
cmpl $2, %eax
cmovgel %eax, %ecx
movq %rcx, 136(%rsp) # 8-byte Spill
xorl %r14d, %r14d
movl $-261, %ebx # imm = 0xFEFB
xorl %r12d, %r12d
jmp .LBB2_24
.p2align 4, 0x90
.LBB2_30: # in Loop: Header=BB2_24 Depth=1
movq %r13, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_31: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit88
# in Loop: Header=BB2_24 Depth=1
movq 80(%rsp), %rbp # 8-byte Reload
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r12
addl %ebp, %r14d
cmpq 136(%rsp), %r12 # 8-byte Folded Reload
je .LBB2_32
.LBB2_24: # %.preheader115
# =>This Loop Header: Depth=1
# Child Loop BB2_26 Depth 2
testl %ebp, %ebp
jle .LBB2_27
# %bb.25: # %.lr.ph128
# in Loop: Header=BB2_24 Depth=1
movl %r14d, %eax
movq 120(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB2_26: # Parent Loop BB2_24 Depth=1
# => This Inner Loop Header: Depth=2
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movl _ZSt4cout+24(%rax), %ecx
andl %ebx, %ecx
orl $4, %ecx
movl %ecx, _ZSt4cout+24(%rax)
movss (%r13,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.11, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %rbp
cmpq %rbp, %r15
jne .LBB2_26
.LBB2_27: # %._crit_edge129
# in Loop: Header=BB2_24 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r13
testq %r13, %r13
je .LBB2_65
# %bb.28: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i85
# in Loop: Header=BB2_24 Depth=1
cmpb $0, 56(%r13)
je .LBB2_30
# %bb.29: # in Loop: Header=BB2_24 Depth=1
movzbl 67(%r13), %eax
jmp .LBB2_31
.LBB2_32: # %._crit_edge132
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r13
testq %r13, %r13
je .LBB2_65
# %bb.33: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i90
cmpb $0, 56(%r13)
je .LBB2_35
# %bb.34:
movzbl 67(%r13), %eax
jmp .LBB2_36
.LBB2_35:
movq %r13, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_36: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit93
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
testl %ebp, %ebp
movq 88(%rsp), %rcx # 8-byte Reload
jle .LBB2_46
# %bb.37: # %.preheader114.lr.ph
cmpl $10, %ebp
movl $10, %r15d
cmovgel %r15d, %ebp
cmpl $10, %ecx
cmovll %ecx, %r15d
cmpl $2, %r15d
movl $1, %eax
cmovll %eax, %r15d
cmpl $2, %ebp
cmovll %eax, %ebp
xorl %ebx, %ebx
movl $-261, %r14d # imm = 0xFEFB
xorl %r12d, %r12d
movq %rbp, 80(%rsp) # 8-byte Spill
jmp .LBB2_38
.p2align 4, 0x90
.LBB2_43: # in Loop: Header=BB2_38 Depth=1
movzbl 67(%r13), %eax
.LBB2_45: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit98
# in Loop: Header=BB2_38 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r12
movq 88(%rsp), %rcx # 8-byte Reload
addl %ecx, %ebx
cmpq %rbp, %r12
je .LBB2_46
.LBB2_38: # %.preheader114
# =>This Loop Header: Depth=1
# Child Loop BB2_40 Depth 2
testl %ecx, %ecx
jle .LBB2_41
# %bb.39: # %.lr.ph135
# in Loop: Header=BB2_38 Depth=1
movl %ebx, %eax
movq 112(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB2_40: # Parent Loop BB2_38 Depth=1
# => This Inner Loop Header: Depth=2
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movl _ZSt4cout+24(%rax), %ecx
andl %r14d, %ecx
orl $4, %ecx
movl %ecx, _ZSt4cout+24(%rax)
movss (%r13,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.11, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %rbp
cmpq %rbp, %r15
jne .LBB2_40
.LBB2_41: # %._crit_edge136
# in Loop: Header=BB2_38 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r13
testq %r13, %r13
je .LBB2_65
# %bb.42: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i95
# in Loop: Header=BB2_38 Depth=1
cmpb $0, 56(%r13)
movq 80(%rsp), %rbp # 8-byte Reload
jne .LBB2_43
# %bb.44: # in Loop: Header=BB2_38 Depth=1
movq %r13, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
jmp .LBB2_45
.LBB2_46: # %._crit_edge139
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r13
testq %r13, %r13
je .LBB2_65
# %bb.47: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i100
cmpb $0, 56(%r13)
je .LBB2_49
# %bb.48:
movzbl 67(%r13), %eax
jmp .LBB2_50
.LBB2_49:
movq %r13, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_50: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit103
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
cmpl $0, 104(%rsp) # 4-byte Folded Reload
movq 88(%rsp), %rax # 8-byte Reload
jle .LBB2_60
# %bb.51: # %.preheader.lr.ph
cmpl $10, %eax
movl $10, %r15d
cmovll %eax, %r15d
cmpl $2, %r15d
movl $1, %edx
cmovll %edx, %r15d
movl 100(%rsp), %ecx # 4-byte Reload
cmpl $2, %ecx
cmovgel %ecx, %edx
movq %rdx, 80(%rsp) # 8-byte Spill
xorl %r14d, %r14d
movl $-261, %ebp # imm = 0xFEFB
xorl %r12d, %r12d
jmp .LBB2_52
.p2align 4, 0x90
.LBB2_58: # in Loop: Header=BB2_52 Depth=1
movq %r13, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_59: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit108
# in Loop: Header=BB2_52 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r12
movq 88(%rsp), %rax # 8-byte Reload
addl %eax, %r14d
cmpq 80(%rsp), %r12 # 8-byte Folded Reload
je .LBB2_60
.LBB2_52: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_54 Depth 2
testl %eax, %eax
jle .LBB2_55
# %bb.53: # %.lr.ph142
# in Loop: Header=BB2_52 Depth=1
movl %r14d, %eax
movq 128(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_54: # Parent Loop BB2_52 Depth=1
# => This Inner Loop Header: Depth=2
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movl _ZSt4cout+24(%rax), %ecx
andl %ebp, %ecx
orl $4, %ecx
movl %ecx, _ZSt4cout+24(%rax)
movss (%r13,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.11, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %rbx
cmpq %rbx, %r15
jne .LBB2_54
.LBB2_55: # %._crit_edge143
# in Loop: Header=BB2_52 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r13
testq %r13, %r13
je .LBB2_65
# %bb.56: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i105
# in Loop: Header=BB2_52 Depth=1
cmpb $0, 56(%r13)
je .LBB2_58
# %bb.57: # in Loop: Header=BB2_52 Depth=1
movzbl 67(%r13), %eax
jmp .LBB2_59
.LBB2_60: # %._crit_edge146
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB2_65
# %bb.61: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i110
cmpb $0, 56(%r12)
je .LBB2_63
# %bb.62:
movzbl 67(%r12), %eax
jmp .LBB2_64
.LBB2_63:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_64: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit113
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 120(%rsp), %rdi # 8-byte Reload
callq _ZdaPv
movq 112(%rsp), %rdi # 8-byte Reload
callq _ZdaPv
movq 128(%rsp), %rdi # 8-byte Reload
callq _ZdaPv
xorl %eax, %eax
addq $216, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_65:
.cfi_def_cfa_offset 272
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12MatMulKernel6MatrixS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "CUDA malloc A: "
.size .L.str, 16
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Copy A to device: "
.size .L.str.1, 19
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\n"
.size .L.str.2, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "CUDA malloc B: "
.size .L.str.3, 16
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Copy B to device: "
.size .L.str.4, 19
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "CUDA malloc C: "
.size .L.str.5, 16
.type _Z12MatMulKernel6MatrixS_S_,@object # @_Z12MatMulKernel6MatrixS_S_
.section .rodata,"a",@progbits
.globl _Z12MatMulKernel6MatrixS_S_
.p2align 3, 0x0
_Z12MatMulKernel6MatrixS_S_:
.quad _Z27__device_stub__MatMulKernel6MatrixS_S_
.size _Z12MatMulKernel6MatrixS_S_, 8
.type .L.str.6,@object # @.str.6
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.6:
.asciz "Run kernel: "
.size .L.str.6, 13
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Copy C off of device: "
.size .L.str.7, 23
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Usage: ./accuracy.o A.height A.width B.width"
.size .L.str.8, 45
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "It took me "
.size .L.str.9, 12
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz " seconds.\n"
.size .L.str.10, 11
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "\t"
.size .L.str.11, 2
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12MatMulKernel6MatrixS_S_"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__MatMulKernel6MatrixS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZSt4cout
.addrsig_sym _Z12MatMulKernel6MatrixS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z12MatMulKernel6MatrixS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R9, SR_CTAID.X ; /* 0x0000000000097919 */
/* 0x000e220000002500 */
/*0020*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */
/* 0x000fe20000000f00 */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0040*/ HFMA2.MMA R21, -RZ, RZ, 0, 0 ; /* 0x00000000ff157435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0060*/ ISETP.GE.AND P1, PT, R2, -0x12, PT ; /* 0xffffffee0200780c */
/* 0x000fc60003f26270 */
/*0070*/ S2R R19, SR_CTAID.Y ; /* 0x0000000000137919 */
/* 0x000e680000002600 */
/*0080*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e620000002200 */
/*0090*/ IMAD R18, R9, 0x14, R0 ; /* 0x0000001409127824 */
/* 0x001fca00078e0200 */
/*00a0*/ ISETP.GE.AND P0, PT, R18, c[0x0][0x190], PT ; /* 0x0000640012007a0c */
/* 0x000fe20003f06270 */
/*00b0*/ IMAD R19, R19, 0x14, R3 ; /* 0x0000001413137824 */
/* 0x002fca00078e0203 */
/*00c0*/ ISETP.GE.OR P0, PT, R19, c[0x0][0x194], P0 ; /* 0x0000650013007a0c */
/* 0x000fe20000706670 */
/*00d0*/ @!P1 BRA 0x600 ; /* 0x0000052000009947 */
/* 0x000fd80003800000 */
/*00e0*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fe20007ffe0ff */
/*00f0*/ IMAD R6, R19, c[0x0][0x160], R0 ; /* 0x0000580013067a24 */
/* 0x000fe200078e0200 */
/*0100*/ MOV R7, 0x4 ; /* 0x0000000400077802 */
/* 0x000fe20000000f00 */
/*0110*/ UMOV UR4, 0xffffffff ; /* 0xffffffff00047882 */
/* 0x000fe20000000000 */
/*0120*/ MOV R21, RZ ; /* 0x000000ff00157202 */
/* 0x000fe20000000f00 */
/*0130*/ IMAD.HI R4, R2, 0x66666667, RZ ; /* 0x6666666702047827 */
/* 0x000fe200078e02ff */
/*0140*/ LEA R16, R0, 0x640, 0x2 ; /* 0x0000064000107811 */
/* 0x000fc600078e10ff */
/*0150*/ IMAD R2, R3, c[0x0][0x178], R0 ; /* 0x00005e0003027a24 */
/* 0x000fe200078e0200 */
/*0160*/ SHF.R.U32.HI R5, RZ, 0x1f, R4 ; /* 0x0000001fff057819 */
/* 0x000fe20000011604 */
/*0170*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc600078e0207 */
/*0180*/ LEA.HI.SX32 R4, R4, R5, 0x1d ; /* 0x0000000504047211 */
/* 0x000fe200078feaff */
/*0190*/ IMAD R5, R3, 0x50, RZ ; /* 0x0000005003057824 */
/* 0x000fe400078e02ff */
/*01a0*/ IMAD R2, R9, 0x14, R2 ; /* 0x0000001409027824 */
/* 0x000fc600078e0202 */
/*01b0*/ LEA R17, R0, R5, 0x2 ; /* 0x0000000500117211 */
/* 0x000fe400078e10ff */
/*01c0*/ ISETP.GE.AND P1, PT, R3, c[0x0][0x17c], PT ; /* 0x00005f0003007a0c */
/* 0x000fe20003f26270 */
/*01d0*/ HFMA2.MMA R26, -RZ, RZ, 0, 0 ; /* 0x00000000ff1a7435 */
/* 0x000fe200000001ff */
/*01e0*/ ISETP.GE.AND P2, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fe40003f46270 */
/*01f0*/ ISETP.GE.OR P1, PT, R18, c[0x0][0x178], P1 ; /* 0x00005e0012007a0c */
/* 0x000fe40000f26670 */
/*0200*/ ISETP.GE.OR P2, PT, R19, c[0x0][0x164], P2 ; /* 0x0000590013007a0c */
/* 0x000fe40001746670 */
/*0210*/ MOV R28, RZ ; /* 0x000000ff001c7202 */
/* 0x000fd20000000f00 */
/*0220*/ @!P1 MOV R25, 0x4 ; /* 0x0000000400199802 */
/* 0x000fe40000000f00 */
/*0230*/ @!P2 LDG.E R26, [R6.64] ; /* 0x00000006061aa981 */
/* 0x0000a6000c1e1900 */
/*0240*/ @!P1 IMAD.WIDE R24, R2, R25, c[0x0][0x188] ; /* 0x0000620002189625 */
/* 0x000fca00078e0219 */
/*0250*/ @!P1 LDG.E R28, [R24.64] ; /* 0x00000006181c9981 */
/* 0x000ee2000c1e1900 */
/*0260*/ UIADD3 UR4, UR4, 0x1, URZ ; /* 0x0000000104047890 */
/* 0x000fcc000fffe03f */
/*0270*/ ISETP.LE.AND P1, PT, R4, UR4, PT ; /* 0x0000000404007c0c */
/* 0x000fe4000bf23270 */
/*0280*/ IADD3 R6, P2, R6, 0x50, RZ ; /* 0x0000005006067810 */
/* 0x001fe40007f5e0ff */
/*0290*/ IADD3 R3, R3, 0x14, RZ ; /* 0x0000001403037810 */
/* 0x000fe40007ffe0ff */
/*02a0*/ IADD3 R0, R0, 0x14, RZ ; /* 0x0000001400007810 */
/* 0x000fe40007ffe0ff */
/*02b0*/ IADD3.X R7, RZ, R7, RZ, P2, !PT ; /* 0x00000007ff077210 */
/* 0x000fe200017fe4ff */
/*02c0*/ STS [R17], R26 ; /* 0x0000001a11007388 */
/* 0x004fe80000000800 */
/*02d0*/ STS [R17+0x640], R28 ; /* 0x0006401c11007388 */
/* 0x008fe80000000800 */
/*02e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*02f0*/ LDS R29, [R16] ; /* 0x00000000101d7984 */
/* 0x000fe80000000800 */
/*0300*/ LDS.128 R8, [R5] ; /* 0x0000000005087984 */
/* 0x000e280000000c00 */
/*0310*/ LDS R26, [R16+0x50] ; /* 0x00005000101a7984 */
/* 0x000e680000000800 */
/*0320*/ LDS R27, [R16+0xa0] ; /* 0x0000a000101b7984 */
/* 0x000ea80000000800 */
/*0330*/ LDS R20, [R16+0xf0] ; /* 0x0000f00010147984 */
/* 0x000ee80000000800 */
/*0340*/ LDS R23, [R16+0x140] ; /* 0x0001400010177984 */
/* 0x000fe80000000800 */
/*0350*/ LDS.128 R12, [R5+0x10] ; /* 0x00001000050c7984 */
/* 0x000f280000000c00 */
/*0360*/ LDS R22, [R16+0x190] ; /* 0x0001900010167984 */
/* 0x000f680000000800 */
/*0370*/ LDS R25, [R16+0x1e0] ; /* 0x0001e00010197984 */
/* 0x000f680000000800 */
/*0380*/ LDS R24, [R16+0x230] ; /* 0x0002300010187984 */
/* 0x000f620000000800 */
/*0390*/ FFMA R8, R29, R8, R21 ; /* 0x000000081d087223 */
/* 0x001fc60000000015 */
/*03a0*/ LDS R21, [R16+0x280] ; /* 0x0002800010157984 */
/* 0x000fe20000000800 */
/*03b0*/ FFMA R8, R26, R9, R8 ; /* 0x000000091a087223 */
/* 0x002fc80000000008 */
/*03c0*/ FFMA R8, R27, R10, R8 ; /* 0x0000000a1b087223 */
/* 0x004fc80000000008 */
/*03d0*/ FFMA R26, R20, R11, R8 ; /* 0x0000000b141a7223 */
/* 0x008fe40000000008 */
/*03e0*/ LDS.128 R8, [R5+0x20] ; /* 0x0000200005087984 */
/* 0x000e280000000c00 */
/*03f0*/ LDS R20, [R16+0x2d0] ; /* 0x0002d00010147984 */
/* 0x000e620000000800 */
/*0400*/ FFMA R12, R23, R12, R26 ; /* 0x0000000c170c7223 */
/* 0x010fc6000000001a */
/*0410*/ LDS R23, [R16+0x320] ; /* 0x0003200010177984 */
/* 0x000ea20000000800 */
/*0420*/ FFMA R12, R22, R13, R12 ; /* 0x0000000d160c7223 */
/* 0x020fc6000000000c */
/*0430*/ LDS R22, [R16+0x370] ; /* 0x0003700010167984 */
/* 0x000ee20000000800 */
/*0440*/ FFMA R12, R25, R14, R12 ; /* 0x0000000e190c7223 */
/* 0x000fc6000000000c */
/*0450*/ LDS R25, [R16+0x3c0] ; /* 0x0003c00010197984 */
/* 0x000fe20000000800 */
/*0460*/ FFMA R24, R24, R15, R12 ; /* 0x0000000f18187223 */
/* 0x000fc6000000000c */
/*0470*/ LDS.128 R12, [R5+0x30] ; /* 0x00003000050c7984 */
/* 0x000f220000000c00 */
/*0480*/ FFMA R8, R21, R8, R24 ; /* 0x0000000815087223 */
/* 0x001fc60000000018 */
/*0490*/ LDS R24, [R16+0x410] ; /* 0x0004100010187984 */
/* 0x000e220000000800 */
/*04a0*/ FFMA R8, R20, R9, R8 ; /* 0x0000000914087223 */
/* 0x002fc60000000008 */
/*04b0*/ LDS R21, [R16+0x460] ; /* 0x0004600010157984 */
/* 0x000e620000000800 */
/*04c0*/ FFMA R8, R23, R10, R8 ; /* 0x0000000a17087223 */
/* 0x004fc60000000008 */
/*04d0*/ LDS R20, [R16+0x4b0] ; /* 0x0004b00010147984 */
/* 0x000ea20000000800 */
/*04e0*/ FFMA R26, R22, R11, R8 ; /* 0x0000000b161a7223 */
/* 0x008fc60000000008 */
/*04f0*/ LDS R23, [R16+0x500] ; /* 0x0005000010177984 */
/* 0x000fe80000000800 */
/*0500*/ LDS.128 R8, [R5+0x40] ; /* 0x0000400005087984 */
/* 0x000ee80000000c00 */
/*0510*/ LDS R22, [R16+0x550] ; /* 0x0005500010167984 */
/* 0x000f620000000800 */
/*0520*/ FFMA R26, R25, R12, R26 ; /* 0x0000000c191a7223 */
/* 0x010fc6000000001a */
/*0530*/ LDS R25, [R16+0x5a0] ; /* 0x0005a00010197984 */
/* 0x000f280000000800 */
/*0540*/ LDS R12, [R16+0x5f0] ; /* 0x0005f000100c7984 */
/* 0x000f220000000800 */
/*0550*/ FFMA R13, R24, R13, R26 ; /* 0x0000000d180d7223 */
/* 0x001fc8000000001a */
/*0560*/ FFMA R13, R21, R14, R13 ; /* 0x0000000e150d7223 */
/* 0x002fc8000000000d */
/*0570*/ FFMA R13, R20, R15, R13 ; /* 0x0000000f140d7223 */
/* 0x004fc8000000000d */
/*0580*/ FFMA R8, R23, R8, R13 ; /* 0x0000000817087223 */
/* 0x008fc8000000000d */
/*0590*/ FFMA R8, R22, R9, R8 ; /* 0x0000000916087223 */
/* 0x020fe20000000008 */
/*05a0*/ HFMA2.MMA R9, -RZ, RZ, 0, 1.1920928955078125e-06 ; /* 0x00000014ff097435 */
/* 0x000fc600000001ff */
/*05b0*/ FFMA R8, R25, R10, R8 ; /* 0x0000000a19087223 */
/* 0x010fc80000000008 */
/*05c0*/ FFMA R21, R12, R11, R8 ; /* 0x0000000b0c157223 */
/* 0x000fc60000000008 */
/*05d0*/ IMAD R2, R9, c[0x0][0x178], R2 ; /* 0x00005e0009027a24 */
/* 0x000fe200078e0202 */
/*05e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*05f0*/ @!P1 BRA 0x1c0 ; /* 0xfffffbc000009947 */
/* 0x000fea000383ffff */
/*0600*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0610*/ MOV R3, 0x4 ; /* 0x0000000400037802 */
/* 0x000fe20000000f00 */
/*0620*/ IMAD R2, R19, c[0x0][0x190], R18 ; /* 0x0000640013027a24 */
/* 0x000fc800078e0212 */
/*0630*/ IMAD.WIDE R2, R2, R3, c[0x0][0x1a0] ; /* 0x0000680002027625 */
/* 0x000fca00078e0203 */
/*0640*/ STG.E [R2.64], R21 ; /* 0x0000001502007986 */
/* 0x000fe2000c101906 */
/*0650*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0660*/ BRA 0x660; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0670*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0680*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0690*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12MatMulKernel6MatrixS_S_
.globl _Z12MatMulKernel6MatrixS_S_
.p2align 8
.type _Z12MatMulKernel6MatrixS_S_,@function
_Z12MatMulKernel6MatrixS_S_:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x0
s_load_b64 s[8:9], s[0:1], 0x40
v_bfe_u32 v3, v0, 10, 10
v_and_b32_e32 v4, 0x3ff, v0
s_delay_alu instid0(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s15, 20, v[3:4]
v_mad_u64_u32 v[1:2], null, s14, 20, v[4:5]
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_cmpk_lt_i32 s3, 0xffee
s_cbranch_scc1 .LBB0_14
s_clause 0x2
s_load_b32 s2, s[0:1], 0x4
s_load_b128 s[4:7], s[0:1], 0x10
s_load_b64 s[10:11], s[0:1], 0x28
v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v9, 2, v4
s_add_i32 s13, s3, -1
v_mov_b32_e32 v7, 0
v_mul_lo_u32 v6, s3, v0
s_delay_alu instid0(VALU_DEP_3)
v_add_nc_u32_e32 v8, 0x640, v9
s_mul_hi_i32 s13, s13, 0x66666667
v_mul_u32_u24_e32 v5, 0x50, v3
s_lshr_b32 s14, s13, 31
s_ashr_i32 s13, s13, 3
v_mad_u32_u24 v9, v3, 0x50, v9
v_mad_u32_u24 v10, v3, 0x50, v8
s_add_i32 s13, s13, s14
s_mov_b32 s12, 0
s_max_i32 s13, s13, 0
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s2, v0
v_cmp_gt_i32_e64 s2, s6, v1
s_xor_b32 s14, vcc_lo, -1
s_delay_alu instid0(VALU_DEP_1)
s_xor_b32 s2, s2, -1
.LBB0_2:
s_mul_i32 s15, s12, 20
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v11, s15, v4
v_cmp_le_i32_e32 vcc_lo, s3, v11
s_or_b32 s16, s14, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s17, s16
s_xor_b32 s16, exec_lo, s17
s_cbranch_execz .LBB0_4
ds_store_b32 v9, v7
.LBB0_4:
s_and_not1_saveexec_b32 s16, s16
s_cbranch_execz .LBB0_6
v_add_nc_u32_e32 v11, v11, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v12, 31, v11
v_lshlrev_b64 v[11:12], 2, v[11:12]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v11, vcc_lo, s4, v11
v_add_co_ci_u32_e32 v12, vcc_lo, s5, v12, vcc_lo
global_load_b32 v11, v[11:12], off
s_waitcnt vmcnt(0)
ds_store_b32 v9, v11
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s16
v_add_nc_u32_e32 v11, s15, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e32 vcc_lo, s7, v11
s_or_b32 s15, s2, vcc_lo
s_and_saveexec_b32 s16, s15
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s15, exec_lo, s16
s_cbranch_execz .LBB0_8
ds_store_b32 v10, v7
.LBB0_8:
s_and_not1_saveexec_b32 s15, s15
s_cbranch_execz .LBB0_10
v_mad_u64_u32 v[12:13], null, v11, s6, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v13, 31, v12
v_lshlrev_b64 v[11:12], 2, v[12:13]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v11, vcc_lo, s10, v11
v_add_co_ci_u32_e32 v12, vcc_lo, s11, v12, vcc_lo
global_load_b32 v11, v[11:12], off
s_waitcnt vmcnt(0)
ds_store_b32 v10, v11
.LBB0_10:
s_or_b32 exec_lo, exec_lo, s15
v_mov_b32_e32 v11, v8
s_mov_b32 s15, 0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.LBB0_11:
v_add_nc_u32_e32 v12, s15, v5
s_add_i32 s15, s15, 4
ds_load_b32 v13, v11
ds_load_b32 v12, v12
v_add_nc_u32_e32 v11, 0x50, v11
s_cmpk_eq_i32 s15, 0x50
s_waitcnt lgkmcnt(0)
v_fmac_f32_e32 v2, v12, v13
s_cbranch_scc0 .LBB0_11
s_add_i32 s15, s12, 1
s_cmp_eq_u32 s12, s13
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_14
s_mov_b32 s12, s15
s_branch .LBB0_2
.LBB0_14:
s_load_b64 s[2:3], s[0:1], 0x30
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s3, v0
v_cmp_gt_i32_e64 s0, s2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s0, vcc_lo, s0
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_16
v_mad_u64_u32 v[3:4], null, s2, v0, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s8, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s9, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_16:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12MatMulKernel6MatrixS_S_
.amdhsa_group_segment_fixed_size 3200
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 72
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12MatMulKernel6MatrixS_S_, .Lfunc_end0-_Z12MatMulKernel6MatrixS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 24
.value_kind: by_value
- .offset: 24
.size: 24
.value_kind: by_value
- .offset: 48
.size: 24
.value_kind: by_value
.group_segment_fixed_size: 3200
.kernarg_segment_align: 8
.kernarg_segment_size: 72
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12MatMulKernel6MatrixS_S_
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z12MatMulKernel6MatrixS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <assert.h>
#include <math.h>
#include <vector>
#include <queue>
#include <ctime>
// CUDA runtime
#include <cuda_runtime.h>
using namespace std;
// Thread block size
#define TBS 512
// Warp size
#define WS 32
class Node {
private:
int value;
int* children;
int numChildren;
int explored;
public:
Node();
Node(int);
__host__ __device__ int getValue();
void addChild(Node*);
__host__ __device__ int* getChildren();
__host__ __device__ int getNumChildren();
void printNode();
void initializeChildren(int);
__host__ __device__ int getExplored();
void setExplored(int);
__device__ int parallelSetExplored(int);
};
__global__ void parentListBackwardsWave(int *d_waveMask, int *d_nextWaveMask, int *d_parent, int *d_parentPtr, int *d_cost, int *d_size) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 0) {
// Loop through all children
for (int i = d_parentPtr[idx]; i < d_parentPtr[idx + 1]; i++) {
if (d_waveMask[d_parent[i]] == 1) {
atomicCAS(&d_nextWaveMask[idx], 0, 1);
d_cost[idx] = d_cost[d_parent[i]] + 1;
break;
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void backwardsWave(int *d_waveMask, int *d_nextWaveMask, int *d_children, int *d_numChildren, int *d_cost, int *d_size, int *d_maxChildren) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 0) {
// Loop through all children
for (int i = 0; i < *d_size * *d_maxChildren; i++) {
if (d_children[i] == idx) {
int parent = i / *d_maxChildren;
if (d_waveMask[parent] == 1) {
atomicCAS(&d_nextWaveMask[idx], 0, 1);
d_cost[idx] = d_cost[parent] + 1;
break;
}
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void childListExploreWave(int *d_waveMask, int *d_nextWaveMask, int *d_children, int *d_numChildren, int *d_cost, int *d_size, int *d_maxChildren) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 1) {
int numChildren = d_numChildren[idx];
for (int i = 0; i < numChildren; i++) {
int child = d_children[idx * *d_maxChildren + i];
atomicCAS(&d_nextWaveMask[child],0,1);
if (d_waveMask[child] == 0) {
d_cost[child] = d_cost[idx] + 1;
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void exploreWave(int *d_waveMask, int *d_nextWaveMask, Node *d_graph, int *d_children, int *d_cost, int *d_size, int *d_maxChildren) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 1) {
Node currentNode = d_graph[idx];
int numChildren = currentNode.getNumChildren();
for (int i = 0; i < numChildren; i++) {
int child = d_children[idx * *d_maxChildren + i];
atomicCAS(&d_nextWaveMask[child],0,1);
if (d_waveMask[child] == 0) {
d_cost[child] = d_cost[idx] + 1;
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void setPreviousExplored(int *d_waveMask, int *d_nextWaveMask, int *d_size){
int idx = blockIdx.x * TBS + threadIdx.x;
if(idx < *d_size){
if(d_waveMask[idx] == 1){
d_nextWaveMask[idx] = 2;
}
}
}
int* generateChildren(Node *nodes, int nNodes, int maxEdgesPerNode) {
int* children = new int[nNodes * maxEdgesPerNode];
for (int i = 0; i < nNodes; i++) {
int numEdges = (rand() % maxEdgesPerNode) + 1;
nodes[i].initializeChildren(numEdges);
for (int j = 0; j < numEdges; j++) {
int child = rand() % nNodes;
bool isChild = false;
for (int k = 0; k < nodes[i].getNumChildren(); k++){
if (child == nodes[i].getChildren()[k]){
isChild = true;
break;
}
}
if (!isChild && child != nodes[i].getValue()){
children[i * maxEdgesPerNode + nodes[i].getNumChildren()] = child;
nodes[i].addChild(&nodes[child]);
}
}
}
/*for (int i = 0; i < nNodes; i++) {
nodes[i].printNode();
}*/
return children;
}
Node* generateGraph(int nNodes) {
srand((unsigned)time(0));
Node* nodes = new Node[nNodes];
for (int i = 0; i < nNodes; i++) {
Node* tmp = new Node(i);
nodes[i] = *tmp;
}
return nodes;
}
void exploreChild(Node* child, vector< vector<Node*> >* path, int depth, Node* nodes) {
int numChildren = child->getNumChildren();
if (numChildren > 0) {
bool *toExplore = new bool[numChildren];
vector<Node*> newPath;
if (path->size() <= depth) {
path->push_back(newPath);
}
vector<Node*>* currentPath = &(path->at(depth));
for (int i = 0; i < numChildren; i++) {
Node* newChild = &nodes[child->getChildren()[i]];
if (newChild->getExplored() == 0) {
currentPath->push_back(newChild);
newChild->setExplored(1);
toExplore[i] = true;
} else {
toExplore[i] = false;
}
}
// Explore loop after push loop so it is actually BFS
for (int i = 0; i < numChildren; i++) {
Node* newChild = &nodes[child->getChildren()[i]];
if (toExplore[i]) {
exploreChild(newChild, path, depth + 1, nodes);
}
}
}
child->setExplored(2);
return;
}
int* bfs(Node* nodes, int size) {
int* cost = new int[size];
for (int i = 0; i < size; i++) {
cost[i] = -1;
}
Node* currentNode = &nodes[0];
queue<Node*> wave;
wave.push(currentNode);
cost[0] = 0;
int depth = 0;
while (!wave.empty()) {
depth = cost[wave.front()->getValue()];
while (!wave.empty() && depth == cost[wave.front()->getValue()]) {
currentNode = wave.front();
wave.pop();
currentNode->setExplored(1);
if (currentNode->getNumChildren() > 0) {
int *children = currentNode->getChildren();
for (int i = 0; i < currentNode->getNumChildren(); i++) {
if (nodes[children[i]].getExplored() == 0) {
nodes[children[i]].setExplored(1);
cost[children[i]] = depth + 1;
wave.push(&nodes[children[i]]);
}
}
}
}
}
return cost;
}
int* transformBfs(vector< vector<Node*> > path, int size) {
int *result = new int[size];
for (int i = 0; i < size; i++) {
result[i] = -1;
}
for (int i = 0; i < path.size(); i++) {
//printf("%i - ", i);
for (int j = 0; j < path[i].size(); j++) {
//printf(" %i ", path[i][j]->getValue());
result[path[i][j]->getValue()] = i;
}
//printf("\n");
}
return result;
}
int* transformNumChildren(Node* nodes, int size) {
int *result = new int[size];
for (int i = 0; i < size; i++) {
result[i] = nodes[i].getNumChildren();
}
return result;
}
int* transformParentPtr(Node* nodes, int size) {
int *result = new int[size + 1];
for (int i = 0; i < size; i++) {
result[i] = 0;
}
for (int i = 0; i < size; i++) {
Node *node = &nodes[i];
if (node->getNumChildren() > 0) {
int *children = node->getChildren();
for (int j = 0; j < node->getNumChildren(); j++) {
int child = children[j];
result[child + 1] += 1;
}
}
}
for (int i = 1; i < size + 1; i++) {
result[i] = result[i] + result[i - 1];
}
return result;
}
int* transformParents(Node* nodes, int size, int* parentPtr) {
int numEdges = parentPtr[size];
int *result = new int[numEdges];
int *curIdx = new int[size];
for (int i = 0; i < size; i++) {
curIdx[i] = parentPtr[i];
}
for (int i = 0; i < size; i++) {
Node *node = &nodes[i];
if (node->getNumChildren() > 0) {
int *children = node->getChildren();
for (int j = 0; j < node->getNumChildren(); j++) {
int child = children[j];
result[curIdx[child]] = i;
curIdx[child] = curIdx[child] + 1;
}
}
}
return result;
}
void callFlipFlopParent(int *d_size, int *d_children, int *d_numChildren, int *d_maxChildren, int *d_parent, int *d_parentPtr, int size, int maxChildren, int *synchResult) {
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
cudaMalloc((void **)&d_cost, size * sizeof(int));
cudaMalloc((void **)&d_waveMask, size * sizeof(int));
cudaMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
cudaMemcpy(d_cost, cost, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_waveMask, waveMask, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
// Record the start event
cudaEventRecord(start, NULL);
bool complete = false;
int completed = 0;
while(!complete) {
// Launch kernel on GPU
if (completed < (maxChildren * maxChildren - 1) / (maxChildren * maxChildren) * size) {
childListExploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
} else {
parentListBackwardsWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_parent, d_parentPtr, d_cost, d_size);
}
cudaDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
cudaDeviceSynchronize();
cudaMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
complete = true;
cudaMemcpy(waveMask, d_waveMask, size * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++) {
if(waveMask[i] == 1) {
complete = false;
} else if (waveMask[i] == 2) {
completed += 1;
}
}
}
// Make sure result is finished
cudaDeviceSynchronize();
// Record end event
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
printf("GPU Parent Flip Flop Explore Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
cudaMemcpy(gpu_result, d_cost, size * sizeof(int), cudaMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
void callFlipFlopWaveExplore(int *d_size, int *d_children, int *d_numChildren, int size, int *d_maxChildren, int maxChildren, int *synchResult) {
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
cudaMalloc((void **)&d_cost, size * sizeof(int));
cudaMalloc((void **)&d_waveMask, size * sizeof(int));
cudaMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
cudaMemcpy(d_cost, cost, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_waveMask, waveMask, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
// Record the start event
cudaEventRecord(start, NULL);
bool complete = false;
int completed = 0;
while(!complete) {
// Launch kernel on GPU
if (completed < (maxChildren * maxChildren - 1) / (maxChildren * maxChildren) * size) {
childListExploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
} else {
backwardsWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
}
cudaDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
cudaDeviceSynchronize();
cudaMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
complete = true;
cudaMemcpy(waveMask, d_waveMask, size * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++) {
if(waveMask[i] == 1) {
complete = false;
} else if (waveMask[i] == 2) {
completed += 1;
}
}
}
// Make sure result is finished
cudaDeviceSynchronize();
// Record end event
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
printf("GPU Flip Flop Explore Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
cudaMemcpy(gpu_result, d_cost, size * sizeof(int), cudaMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
void callChildListExploreWave(int *d_size, int *d_children, int *d_numChildren, int size, int *d_maxChildren, int *synchResult) {
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
cudaMalloc((void **)&d_cost, size * sizeof(int));
cudaMalloc((void **)&d_waveMask, size * sizeof(int));
cudaMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
cudaMemcpy(d_cost, cost, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_waveMask, waveMask, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
// Record the start event
cudaEventRecord(start, NULL);
bool complete = false;
while(!complete) {
// Launch kernel on GPU
childListExploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
cudaDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
cudaDeviceSynchronize();
cudaMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
complete = true;
cudaMemcpy(waveMask, d_waveMask, size * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++){
if(waveMask[i] == 1){
complete = false;
}
}
}
// Make sure result is finished
cudaDeviceSynchronize();
// Record end event
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
printf("GPU Child List Explore Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
cudaMemcpy(gpu_result, d_cost, size * sizeof(int), cudaMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
void callDeviceCachedVisitBFS(Node *d_graph, int *d_size, int *d_children, int size, int *d_maxChildren, int *synchResult) {
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
cudaMalloc((void **)&d_cost, size * sizeof(int));
cudaMalloc((void **)&d_waveMask, size * sizeof(int));
cudaMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
cudaMemcpy(d_cost, cost, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_waveMask, waveMask, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
// Record the start event
cudaEventRecord(start, NULL);
bool complete = false;
while(!complete) {
// Launch kernel on GPU
exploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_graph, d_children, d_cost, d_size, d_maxChildren);
cudaDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
cudaDeviceSynchronize();
cudaMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
//exploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_graph, d_children, d_cost, d_size, d_maxChildren);
complete = true;
cudaMemcpy(waveMask, d_waveMask, size * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++){
if(waveMask[i] == 1){
complete = false;
}
}
}
// Make sure result is finished
cudaDeviceSynchronize();
// Record end event
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
printf("GPU Wave Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
cudaMemcpy(gpu_result, d_cost, size * sizeof(int), cudaMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
int main (int argc, char **argv) {
if (argc !=3) {
printf("\nToo few arguments!\n");
abort();
}
// Get command line argument
int size = atoi(argv[1]);
int maxEdgesPerNode = atoi(argv[2]);
Node* nodes = generateGraph(size);
int* children = generateChildren(nodes, size, maxEdgesPerNode);
int* numChildren = transformNumChildren(nodes, size);
int* parentPtr = transformParentPtr(nodes, size);
int numEdges = parentPtr[size];
int* parent = transformParents(nodes, size, parentPtr);
/*for (int i = 0; i < size + 1; i++) {
printf("%i parentPtr: %i\n", i, parentPtr[i]);
}
for (int i = 0; i < size; i++) {
for (int j = parentPtr[i]; j < parentPtr[i + 1]; j++) {
printf("%i child: %i parent: %i\n", j, i, parent[j]);
}
}*/
Node* d_graph;
int *d_children, *d_size, *d_maxChildren, *d_numChildren, *d_parent, *d_parentPtr;
// Allocate space for device copies
cudaMalloc((void **)&d_graph, size * sizeof(Node));
cudaMalloc((void **)&d_size, sizeof(int));
cudaMalloc((void **)&d_maxChildren, sizeof(int));
cudaMalloc((void **)&d_children, size * maxEdgesPerNode * sizeof(int));
cudaMalloc((void **)&d_numChildren, size * sizeof(int));
cudaMalloc((void **)&d_parentPtr, (size + 1) * sizeof(int));
cudaMalloc((void **)&d_parent, numEdges * sizeof(int));
// Copy inputs to device
cudaMemcpy(d_graph, nodes, size * sizeof(Node), cudaMemcpyHostToDevice);
cudaMemcpy(d_size, &size, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_maxChildren, &maxEdgesPerNode, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_children, children, size * maxEdgesPerNode * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_numChildren, numChildren, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_parentPtr, parentPtr, (size + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_parent, parent, numEdges * sizeof(int), cudaMemcpyHostToDevice);
//Synchronouse bfs
//vector< vector<Node*> > path = bfs(nodes, size);
clock_t start;
clock_t end;
start = clock();
int *synchResult = bfs(nodes, size);
end = clock();
printf("CPU Time= %.3f msec\n", (end - start) / (double) (CLOCKS_PER_SEC / 1000));
callDeviceCachedVisitBFS(d_graph, d_size, d_children, size, d_maxChildren, synchResult);
callChildListExploreWave(d_size, d_children, d_numChildren, size, d_maxChildren, synchResult);
//callFlipFlopWaveExplore(d_size, d_children, d_numChildren, size, d_maxChildren, maxEdgesPerNode, synchResult);
callFlipFlopParent(d_size, d_children, d_numChildren, d_maxChildren, d_parent, d_parentPtr, size, maxEdgesPerNode, synchResult);
// Cleanup
cudaFree(d_graph);
cudaFree(d_size);
cudaFree(d_children);
cudaFree(d_numChildren);
cudaFree(d_maxChildren);
return 0;
}
Node::Node(int newValue) {
value = newValue;
explored = 0;
}
Node::Node() {
}
__host__ __device__ int Node::getValue() {
return value;
}
__host__ __device__ int* Node::getChildren() {
return children;
}
__host__ __device__ int Node::getNumChildren() {
return numChildren;
}
void Node::addChild(Node* child) {
children[numChildren] = child->getValue();
numChildren++;
return;
}
void Node::printNode() {
printf("Value: %i Children: [", value);
for (int i = 0; i < numChildren; i++) {
printf("%i", children[i]);
if (i != numChildren - 1) {
printf(", ");
}
}
printf("]\n");
return;
}
void Node::initializeChildren(int numEdges) {
children = new int[numEdges];
}
__host__ __device__ int Node::getExplored() {
return explored;
}
__device__ int Node::parallelSetExplored(int newExplored) {
return atomicExch(&explored, newExplored);
}
void Node::setExplored(int newExplored) {
explored = newExplored;
return;
} | #include <stdio.h>
#include <assert.h>
#include <math.h>
#include <vector>
#include <queue>
#include <ctime>
// CUDA runtime
#include <hip/hip_runtime.h>
using namespace std;
// Thread block size
#define TBS 512
// Warp size
#define WS 32
class Node {
private:
int value;
int* children;
int numChildren;
int explored;
public:
Node();
Node(int);
__host__ __device__ int getValue();
void addChild(Node*);
__host__ __device__ int* getChildren();
__host__ __device__ int getNumChildren();
void printNode();
void initializeChildren(int);
__host__ __device__ int getExplored();
void setExplored(int);
__device__ int parallelSetExplored(int);
};
__global__ void parentListBackwardsWave(int *d_waveMask, int *d_nextWaveMask, int *d_parent, int *d_parentPtr, int *d_cost, int *d_size) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 0) {
// Loop through all children
for (int i = d_parentPtr[idx]; i < d_parentPtr[idx + 1]; i++) {
if (d_waveMask[d_parent[i]] == 1) {
atomicCAS(&d_nextWaveMask[idx], 0, 1);
d_cost[idx] = d_cost[d_parent[i]] + 1;
break;
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void backwardsWave(int *d_waveMask, int *d_nextWaveMask, int *d_children, int *d_numChildren, int *d_cost, int *d_size, int *d_maxChildren) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 0) {
// Loop through all children
for (int i = 0; i < *d_size * *d_maxChildren; i++) {
if (d_children[i] == idx) {
int parent = i / *d_maxChildren;
if (d_waveMask[parent] == 1) {
atomicCAS(&d_nextWaveMask[idx], 0, 1);
d_cost[idx] = d_cost[parent] + 1;
break;
}
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void childListExploreWave(int *d_waveMask, int *d_nextWaveMask, int *d_children, int *d_numChildren, int *d_cost, int *d_size, int *d_maxChildren) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 1) {
int numChildren = d_numChildren[idx];
for (int i = 0; i < numChildren; i++) {
int child = d_children[idx * *d_maxChildren + i];
atomicCAS(&d_nextWaveMask[child],0,1);
if (d_waveMask[child] == 0) {
d_cost[child] = d_cost[idx] + 1;
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void exploreWave(int *d_waveMask, int *d_nextWaveMask, Node *d_graph, int *d_children, int *d_cost, int *d_size, int *d_maxChildren) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 1) {
Node currentNode = d_graph[idx];
int numChildren = currentNode.getNumChildren();
for (int i = 0; i < numChildren; i++) {
int child = d_children[idx * *d_maxChildren + i];
atomicCAS(&d_nextWaveMask[child],0,1);
if (d_waveMask[child] == 0) {
d_cost[child] = d_cost[idx] + 1;
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void setPreviousExplored(int *d_waveMask, int *d_nextWaveMask, int *d_size){
int idx = blockIdx.x * TBS + threadIdx.x;
if(idx < *d_size){
if(d_waveMask[idx] == 1){
d_nextWaveMask[idx] = 2;
}
}
}
int* generateChildren(Node *nodes, int nNodes, int maxEdgesPerNode) {
int* children = new int[nNodes * maxEdgesPerNode];
for (int i = 0; i < nNodes; i++) {
int numEdges = (rand() % maxEdgesPerNode) + 1;
nodes[i].initializeChildren(numEdges);
for (int j = 0; j < numEdges; j++) {
int child = rand() % nNodes;
bool isChild = false;
for (int k = 0; k < nodes[i].getNumChildren(); k++){
if (child == nodes[i].getChildren()[k]){
isChild = true;
break;
}
}
if (!isChild && child != nodes[i].getValue()){
children[i * maxEdgesPerNode + nodes[i].getNumChildren()] = child;
nodes[i].addChild(&nodes[child]);
}
}
}
/*for (int i = 0; i < nNodes; i++) {
nodes[i].printNode();
}*/
return children;
}
Node* generateGraph(int nNodes) {
srand((unsigned)time(0));
Node* nodes = new Node[nNodes];
for (int i = 0; i < nNodes; i++) {
Node* tmp = new Node(i);
nodes[i] = *tmp;
}
return nodes;
}
void exploreChild(Node* child, vector< vector<Node*> >* path, int depth, Node* nodes) {
int numChildren = child->getNumChildren();
if (numChildren > 0) {
bool *toExplore = new bool[numChildren];
vector<Node*> newPath;
if (path->size() <= depth) {
path->push_back(newPath);
}
vector<Node*>* currentPath = &(path->at(depth));
for (int i = 0; i < numChildren; i++) {
Node* newChild = &nodes[child->getChildren()[i]];
if (newChild->getExplored() == 0) {
currentPath->push_back(newChild);
newChild->setExplored(1);
toExplore[i] = true;
} else {
toExplore[i] = false;
}
}
// Explore loop after push loop so it is actually BFS
for (int i = 0; i < numChildren; i++) {
Node* newChild = &nodes[child->getChildren()[i]];
if (toExplore[i]) {
exploreChild(newChild, path, depth + 1, nodes);
}
}
}
child->setExplored(2);
return;
}
int* bfs(Node* nodes, int size) {
int* cost = new int[size];
for (int i = 0; i < size; i++) {
cost[i] = -1;
}
Node* currentNode = &nodes[0];
queue<Node*> wave;
wave.push(currentNode);
cost[0] = 0;
int depth = 0;
while (!wave.empty()) {
depth = cost[wave.front()->getValue()];
while (!wave.empty() && depth == cost[wave.front()->getValue()]) {
currentNode = wave.front();
wave.pop();
currentNode->setExplored(1);
if (currentNode->getNumChildren() > 0) {
int *children = currentNode->getChildren();
for (int i = 0; i < currentNode->getNumChildren(); i++) {
if (nodes[children[i]].getExplored() == 0) {
nodes[children[i]].setExplored(1);
cost[children[i]] = depth + 1;
wave.push(&nodes[children[i]]);
}
}
}
}
}
return cost;
}
int* transformBfs(vector< vector<Node*> > path, int size) {
int *result = new int[size];
for (int i = 0; i < size; i++) {
result[i] = -1;
}
for (int i = 0; i < path.size(); i++) {
//printf("%i - ", i);
for (int j = 0; j < path[i].size(); j++) {
//printf(" %i ", path[i][j]->getValue());
result[path[i][j]->getValue()] = i;
}
//printf("\n");
}
return result;
}
int* transformNumChildren(Node* nodes, int size) {
int *result = new int[size];
for (int i = 0; i < size; i++) {
result[i] = nodes[i].getNumChildren();
}
return result;
}
int* transformParentPtr(Node* nodes, int size) {
int *result = new int[size + 1];
for (int i = 0; i < size; i++) {
result[i] = 0;
}
for (int i = 0; i < size; i++) {
Node *node = &nodes[i];
if (node->getNumChildren() > 0) {
int *children = node->getChildren();
for (int j = 0; j < node->getNumChildren(); j++) {
int child = children[j];
result[child + 1] += 1;
}
}
}
for (int i = 1; i < size + 1; i++) {
result[i] = result[i] + result[i - 1];
}
return result;
}
int* transformParents(Node* nodes, int size, int* parentPtr) {
int numEdges = parentPtr[size];
int *result = new int[numEdges];
int *curIdx = new int[size];
for (int i = 0; i < size; i++) {
curIdx[i] = parentPtr[i];
}
for (int i = 0; i < size; i++) {
Node *node = &nodes[i];
if (node->getNumChildren() > 0) {
int *children = node->getChildren();
for (int j = 0; j < node->getNumChildren(); j++) {
int child = children[j];
result[curIdx[child]] = i;
curIdx[child] = curIdx[child] + 1;
}
}
}
return result;
}
void callFlipFlopParent(int *d_size, int *d_children, int *d_numChildren, int *d_maxChildren, int *d_parent, int *d_parentPtr, int size, int maxChildren, int *synchResult) {
hipEvent_t start;
hipEventCreate(&start);
hipEvent_t stop;
hipEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
hipMalloc((void **)&d_cost, size * sizeof(int));
hipMalloc((void **)&d_waveMask, size * sizeof(int));
hipMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
hipMemcpy(d_cost, cost, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_waveMask, waveMask, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
// Record the start event
hipEventRecord(start, NULL);
bool complete = false;
int completed = 0;
while(!complete) {
// Launch kernel on GPU
if (completed < (maxChildren * maxChildren - 1) / (maxChildren * maxChildren) * size) {
childListExploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
} else {
parentListBackwardsWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_parent, d_parentPtr, d_cost, d_size);
}
hipDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
hipDeviceSynchronize();
hipMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), hipMemcpyDeviceToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
complete = true;
hipMemcpy(waveMask, d_waveMask, size * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++) {
if(waveMask[i] == 1) {
complete = false;
} else if (waveMask[i] == 2) {
completed += 1;
}
}
}
// Make sure result is finished
hipDeviceSynchronize();
// Record end event
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
printf("GPU Parent Flip Flop Explore Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
hipMemcpy(gpu_result, d_cost, size * sizeof(int), hipMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
void callFlipFlopWaveExplore(int *d_size, int *d_children, int *d_numChildren, int size, int *d_maxChildren, int maxChildren, int *synchResult) {
hipEvent_t start;
hipEventCreate(&start);
hipEvent_t stop;
hipEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
hipMalloc((void **)&d_cost, size * sizeof(int));
hipMalloc((void **)&d_waveMask, size * sizeof(int));
hipMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
hipMemcpy(d_cost, cost, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_waveMask, waveMask, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
// Record the start event
hipEventRecord(start, NULL);
bool complete = false;
int completed = 0;
while(!complete) {
// Launch kernel on GPU
if (completed < (maxChildren * maxChildren - 1) / (maxChildren * maxChildren) * size) {
childListExploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
} else {
backwardsWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
}
hipDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
hipDeviceSynchronize();
hipMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), hipMemcpyDeviceToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
complete = true;
hipMemcpy(waveMask, d_waveMask, size * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++) {
if(waveMask[i] == 1) {
complete = false;
} else if (waveMask[i] == 2) {
completed += 1;
}
}
}
// Make sure result is finished
hipDeviceSynchronize();
// Record end event
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
printf("GPU Flip Flop Explore Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
hipMemcpy(gpu_result, d_cost, size * sizeof(int), hipMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
void callChildListExploreWave(int *d_size, int *d_children, int *d_numChildren, int size, int *d_maxChildren, int *synchResult) {
hipEvent_t start;
hipEventCreate(&start);
hipEvent_t stop;
hipEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
hipMalloc((void **)&d_cost, size * sizeof(int));
hipMalloc((void **)&d_waveMask, size * sizeof(int));
hipMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
hipMemcpy(d_cost, cost, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_waveMask, waveMask, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
// Record the start event
hipEventRecord(start, NULL);
bool complete = false;
while(!complete) {
// Launch kernel on GPU
childListExploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
hipDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
hipDeviceSynchronize();
hipMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), hipMemcpyDeviceToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
complete = true;
hipMemcpy(waveMask, d_waveMask, size * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++){
if(waveMask[i] == 1){
complete = false;
}
}
}
// Make sure result is finished
hipDeviceSynchronize();
// Record end event
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
printf("GPU Child List Explore Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
hipMemcpy(gpu_result, d_cost, size * sizeof(int), hipMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
void callDeviceCachedVisitBFS(Node *d_graph, int *d_size, int *d_children, int size, int *d_maxChildren, int *synchResult) {
hipEvent_t start;
hipEventCreate(&start);
hipEvent_t stop;
hipEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
hipMalloc((void **)&d_cost, size * sizeof(int));
hipMalloc((void **)&d_waveMask, size * sizeof(int));
hipMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
hipMemcpy(d_cost, cost, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_waveMask, waveMask, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
// Record the start event
hipEventRecord(start, NULL);
bool complete = false;
while(!complete) {
// Launch kernel on GPU
exploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_graph, d_children, d_cost, d_size, d_maxChildren);
hipDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
hipDeviceSynchronize();
hipMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), hipMemcpyDeviceToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
//exploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_graph, d_children, d_cost, d_size, d_maxChildren);
complete = true;
hipMemcpy(waveMask, d_waveMask, size * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++){
if(waveMask[i] == 1){
complete = false;
}
}
}
// Make sure result is finished
hipDeviceSynchronize();
// Record end event
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
printf("GPU Wave Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
hipMemcpy(gpu_result, d_cost, size * sizeof(int), hipMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
int main (int argc, char **argv) {
if (argc !=3) {
printf("\nToo few arguments!\n");
abort();
}
// Get command line argument
int size = atoi(argv[1]);
int maxEdgesPerNode = atoi(argv[2]);
Node* nodes = generateGraph(size);
int* children = generateChildren(nodes, size, maxEdgesPerNode);
int* numChildren = transformNumChildren(nodes, size);
int* parentPtr = transformParentPtr(nodes, size);
int numEdges = parentPtr[size];
int* parent = transformParents(nodes, size, parentPtr);
/*for (int i = 0; i < size + 1; i++) {
printf("%i parentPtr: %i\n", i, parentPtr[i]);
}
for (int i = 0; i < size; i++) {
for (int j = parentPtr[i]; j < parentPtr[i + 1]; j++) {
printf("%i child: %i parent: %i\n", j, i, parent[j]);
}
}*/
Node* d_graph;
int *d_children, *d_size, *d_maxChildren, *d_numChildren, *d_parent, *d_parentPtr;
// Allocate space for device copies
hipMalloc((void **)&d_graph, size * sizeof(Node));
hipMalloc((void **)&d_size, sizeof(int));
hipMalloc((void **)&d_maxChildren, sizeof(int));
hipMalloc((void **)&d_children, size * maxEdgesPerNode * sizeof(int));
hipMalloc((void **)&d_numChildren, size * sizeof(int));
hipMalloc((void **)&d_parentPtr, (size + 1) * sizeof(int));
hipMalloc((void **)&d_parent, numEdges * sizeof(int));
// Copy inputs to device
hipMemcpy(d_graph, nodes, size * sizeof(Node), hipMemcpyHostToDevice);
hipMemcpy(d_size, &size, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_maxChildren, &maxEdgesPerNode, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_children, children, size * maxEdgesPerNode * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_numChildren, numChildren, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_parentPtr, parentPtr, (size + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_parent, parent, numEdges * sizeof(int), hipMemcpyHostToDevice);
//Synchronouse bfs
//vector< vector<Node*> > path = bfs(nodes, size);
clock_t start;
clock_t end;
start = clock();
int *synchResult = bfs(nodes, size);
end = clock();
printf("CPU Time= %.3f msec\n", (end - start) / (double) (CLOCKS_PER_SEC / 1000));
callDeviceCachedVisitBFS(d_graph, d_size, d_children, size, d_maxChildren, synchResult);
callChildListExploreWave(d_size, d_children, d_numChildren, size, d_maxChildren, synchResult);
//callFlipFlopWaveExplore(d_size, d_children, d_numChildren, size, d_maxChildren, maxEdgesPerNode, synchResult);
callFlipFlopParent(d_size, d_children, d_numChildren, d_maxChildren, d_parent, d_parentPtr, size, maxEdgesPerNode, synchResult);
// Cleanup
hipFree(d_graph);
hipFree(d_size);
hipFree(d_children);
hipFree(d_numChildren);
hipFree(d_maxChildren);
return 0;
}
Node::Node(int newValue) {
value = newValue;
explored = 0;
}
Node::Node() {
}
__host__ __device__ int Node::getValue() {
return value;
}
__host__ __device__ int* Node::getChildren() {
return children;
}
__host__ __device__ int Node::getNumChildren() {
return numChildren;
}
void Node::addChild(Node* child) {
children[numChildren] = child->getValue();
numChildren++;
return;
}
void Node::printNode() {
printf("Value: %i Children: [", value);
for (int i = 0; i < numChildren; i++) {
printf("%i", children[i]);
if (i != numChildren - 1) {
printf(", ");
}
}
printf("]\n");
return;
}
void Node::initializeChildren(int numEdges) {
children = new int[numEdges];
}
__host__ __device__ int Node::getExplored() {
return explored;
}
__device__ int Node::parallelSetExplored(int newExplored) {
return atomicExch(&explored, newExplored);
}
void Node::setExplored(int newExplored) {
explored = newExplored;
return;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <assert.h>
#include <math.h>
#include <vector>
#include <queue>
#include <ctime>
// CUDA runtime
#include <hip/hip_runtime.h>
using namespace std;
// Thread block size
#define TBS 512
// Warp size
#define WS 32
class Node {
private:
int value;
int* children;
int numChildren;
int explored;
public:
Node();
Node(int);
__host__ __device__ int getValue();
void addChild(Node*);
__host__ __device__ int* getChildren();
__host__ __device__ int getNumChildren();
void printNode();
void initializeChildren(int);
__host__ __device__ int getExplored();
void setExplored(int);
__device__ int parallelSetExplored(int);
};
__global__ void parentListBackwardsWave(int *d_waveMask, int *d_nextWaveMask, int *d_parent, int *d_parentPtr, int *d_cost, int *d_size) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 0) {
// Loop through all children
for (int i = d_parentPtr[idx]; i < d_parentPtr[idx + 1]; i++) {
if (d_waveMask[d_parent[i]] == 1) {
atomicCAS(&d_nextWaveMask[idx], 0, 1);
d_cost[idx] = d_cost[d_parent[i]] + 1;
break;
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void backwardsWave(int *d_waveMask, int *d_nextWaveMask, int *d_children, int *d_numChildren, int *d_cost, int *d_size, int *d_maxChildren) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 0) {
// Loop through all children
for (int i = 0; i < *d_size * *d_maxChildren; i++) {
if (d_children[i] == idx) {
int parent = i / *d_maxChildren;
if (d_waveMask[parent] == 1) {
atomicCAS(&d_nextWaveMask[idx], 0, 1);
d_cost[idx] = d_cost[parent] + 1;
break;
}
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void childListExploreWave(int *d_waveMask, int *d_nextWaveMask, int *d_children, int *d_numChildren, int *d_cost, int *d_size, int *d_maxChildren) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 1) {
int numChildren = d_numChildren[idx];
for (int i = 0; i < numChildren; i++) {
int child = d_children[idx * *d_maxChildren + i];
atomicCAS(&d_nextWaveMask[child],0,1);
if (d_waveMask[child] == 0) {
d_cost[child] = d_cost[idx] + 1;
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void exploreWave(int *d_waveMask, int *d_nextWaveMask, Node *d_graph, int *d_children, int *d_cost, int *d_size, int *d_maxChildren) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 1) {
Node currentNode = d_graph[idx];
int numChildren = currentNode.getNumChildren();
for (int i = 0; i < numChildren; i++) {
int child = d_children[idx * *d_maxChildren + i];
atomicCAS(&d_nextWaveMask[child],0,1);
if (d_waveMask[child] == 0) {
d_cost[child] = d_cost[idx] + 1;
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void setPreviousExplored(int *d_waveMask, int *d_nextWaveMask, int *d_size){
int idx = blockIdx.x * TBS + threadIdx.x;
if(idx < *d_size){
if(d_waveMask[idx] == 1){
d_nextWaveMask[idx] = 2;
}
}
}
int* generateChildren(Node *nodes, int nNodes, int maxEdgesPerNode) {
int* children = new int[nNodes * maxEdgesPerNode];
for (int i = 0; i < nNodes; i++) {
int numEdges = (rand() % maxEdgesPerNode) + 1;
nodes[i].initializeChildren(numEdges);
for (int j = 0; j < numEdges; j++) {
int child = rand() % nNodes;
bool isChild = false;
for (int k = 0; k < nodes[i].getNumChildren(); k++){
if (child == nodes[i].getChildren()[k]){
isChild = true;
break;
}
}
if (!isChild && child != nodes[i].getValue()){
children[i * maxEdgesPerNode + nodes[i].getNumChildren()] = child;
nodes[i].addChild(&nodes[child]);
}
}
}
/*for (int i = 0; i < nNodes; i++) {
nodes[i].printNode();
}*/
return children;
}
Node* generateGraph(int nNodes) {
srand((unsigned)time(0));
Node* nodes = new Node[nNodes];
for (int i = 0; i < nNodes; i++) {
Node* tmp = new Node(i);
nodes[i] = *tmp;
}
return nodes;
}
void exploreChild(Node* child, vector< vector<Node*> >* path, int depth, Node* nodes) {
int numChildren = child->getNumChildren();
if (numChildren > 0) {
bool *toExplore = new bool[numChildren];
vector<Node*> newPath;
if (path->size() <= depth) {
path->push_back(newPath);
}
vector<Node*>* currentPath = &(path->at(depth));
for (int i = 0; i < numChildren; i++) {
Node* newChild = &nodes[child->getChildren()[i]];
if (newChild->getExplored() == 0) {
currentPath->push_back(newChild);
newChild->setExplored(1);
toExplore[i] = true;
} else {
toExplore[i] = false;
}
}
// Explore loop after push loop so it is actually BFS
for (int i = 0; i < numChildren; i++) {
Node* newChild = &nodes[child->getChildren()[i]];
if (toExplore[i]) {
exploreChild(newChild, path, depth + 1, nodes);
}
}
}
child->setExplored(2);
return;
}
int* bfs(Node* nodes, int size) {
int* cost = new int[size];
for (int i = 0; i < size; i++) {
cost[i] = -1;
}
Node* currentNode = &nodes[0];
queue<Node*> wave;
wave.push(currentNode);
cost[0] = 0;
int depth = 0;
while (!wave.empty()) {
depth = cost[wave.front()->getValue()];
while (!wave.empty() && depth == cost[wave.front()->getValue()]) {
currentNode = wave.front();
wave.pop();
currentNode->setExplored(1);
if (currentNode->getNumChildren() > 0) {
int *children = currentNode->getChildren();
for (int i = 0; i < currentNode->getNumChildren(); i++) {
if (nodes[children[i]].getExplored() == 0) {
nodes[children[i]].setExplored(1);
cost[children[i]] = depth + 1;
wave.push(&nodes[children[i]]);
}
}
}
}
}
return cost;
}
int* transformBfs(vector< vector<Node*> > path, int size) {
int *result = new int[size];
for (int i = 0; i < size; i++) {
result[i] = -1;
}
for (int i = 0; i < path.size(); i++) {
//printf("%i - ", i);
for (int j = 0; j < path[i].size(); j++) {
//printf(" %i ", path[i][j]->getValue());
result[path[i][j]->getValue()] = i;
}
//printf("\n");
}
return result;
}
int* transformNumChildren(Node* nodes, int size) {
int *result = new int[size];
for (int i = 0; i < size; i++) {
result[i] = nodes[i].getNumChildren();
}
return result;
}
int* transformParentPtr(Node* nodes, int size) {
int *result = new int[size + 1];
for (int i = 0; i < size; i++) {
result[i] = 0;
}
for (int i = 0; i < size; i++) {
Node *node = &nodes[i];
if (node->getNumChildren() > 0) {
int *children = node->getChildren();
for (int j = 0; j < node->getNumChildren(); j++) {
int child = children[j];
result[child + 1] += 1;
}
}
}
for (int i = 1; i < size + 1; i++) {
result[i] = result[i] + result[i - 1];
}
return result;
}
int* transformParents(Node* nodes, int size, int* parentPtr) {
int numEdges = parentPtr[size];
int *result = new int[numEdges];
int *curIdx = new int[size];
for (int i = 0; i < size; i++) {
curIdx[i] = parentPtr[i];
}
for (int i = 0; i < size; i++) {
Node *node = &nodes[i];
if (node->getNumChildren() > 0) {
int *children = node->getChildren();
for (int j = 0; j < node->getNumChildren(); j++) {
int child = children[j];
result[curIdx[child]] = i;
curIdx[child] = curIdx[child] + 1;
}
}
}
return result;
}
void callFlipFlopParent(int *d_size, int *d_children, int *d_numChildren, int *d_maxChildren, int *d_parent, int *d_parentPtr, int size, int maxChildren, int *synchResult) {
hipEvent_t start;
hipEventCreate(&start);
hipEvent_t stop;
hipEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
hipMalloc((void **)&d_cost, size * sizeof(int));
hipMalloc((void **)&d_waveMask, size * sizeof(int));
hipMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
hipMemcpy(d_cost, cost, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_waveMask, waveMask, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
// Record the start event
hipEventRecord(start, NULL);
bool complete = false;
int completed = 0;
while(!complete) {
// Launch kernel on GPU
if (completed < (maxChildren * maxChildren - 1) / (maxChildren * maxChildren) * size) {
childListExploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
} else {
parentListBackwardsWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_parent, d_parentPtr, d_cost, d_size);
}
hipDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
hipDeviceSynchronize();
hipMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), hipMemcpyDeviceToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
complete = true;
hipMemcpy(waveMask, d_waveMask, size * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++) {
if(waveMask[i] == 1) {
complete = false;
} else if (waveMask[i] == 2) {
completed += 1;
}
}
}
// Make sure result is finished
hipDeviceSynchronize();
// Record end event
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
printf("GPU Parent Flip Flop Explore Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
hipMemcpy(gpu_result, d_cost, size * sizeof(int), hipMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
void callFlipFlopWaveExplore(int *d_size, int *d_children, int *d_numChildren, int size, int *d_maxChildren, int maxChildren, int *synchResult) {
hipEvent_t start;
hipEventCreate(&start);
hipEvent_t stop;
hipEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
hipMalloc((void **)&d_cost, size * sizeof(int));
hipMalloc((void **)&d_waveMask, size * sizeof(int));
hipMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
hipMemcpy(d_cost, cost, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_waveMask, waveMask, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
// Record the start event
hipEventRecord(start, NULL);
bool complete = false;
int completed = 0;
while(!complete) {
// Launch kernel on GPU
if (completed < (maxChildren * maxChildren - 1) / (maxChildren * maxChildren) * size) {
childListExploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
} else {
backwardsWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
}
hipDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
hipDeviceSynchronize();
hipMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), hipMemcpyDeviceToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
complete = true;
hipMemcpy(waveMask, d_waveMask, size * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++) {
if(waveMask[i] == 1) {
complete = false;
} else if (waveMask[i] == 2) {
completed += 1;
}
}
}
// Make sure result is finished
hipDeviceSynchronize();
// Record end event
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
printf("GPU Flip Flop Explore Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
hipMemcpy(gpu_result, d_cost, size * sizeof(int), hipMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
void callChildListExploreWave(int *d_size, int *d_children, int *d_numChildren, int size, int *d_maxChildren, int *synchResult) {
hipEvent_t start;
hipEventCreate(&start);
hipEvent_t stop;
hipEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
hipMalloc((void **)&d_cost, size * sizeof(int));
hipMalloc((void **)&d_waveMask, size * sizeof(int));
hipMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
hipMemcpy(d_cost, cost, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_waveMask, waveMask, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
// Record the start event
hipEventRecord(start, NULL);
bool complete = false;
while(!complete) {
// Launch kernel on GPU
childListExploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
hipDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
hipDeviceSynchronize();
hipMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), hipMemcpyDeviceToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
complete = true;
hipMemcpy(waveMask, d_waveMask, size * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++){
if(waveMask[i] == 1){
complete = false;
}
}
}
// Make sure result is finished
hipDeviceSynchronize();
// Record end event
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
printf("GPU Child List Explore Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
hipMemcpy(gpu_result, d_cost, size * sizeof(int), hipMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
void callDeviceCachedVisitBFS(Node *d_graph, int *d_size, int *d_children, int size, int *d_maxChildren, int *synchResult) {
hipEvent_t start;
hipEventCreate(&start);
hipEvent_t stop;
hipEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
hipMalloc((void **)&d_cost, size * sizeof(int));
hipMalloc((void **)&d_waveMask, size * sizeof(int));
hipMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
hipMemcpy(d_cost, cost, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_waveMask, waveMask, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
// Record the start event
hipEventRecord(start, NULL);
bool complete = false;
while(!complete) {
// Launch kernel on GPU
exploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_graph, d_children, d_cost, d_size, d_maxChildren);
hipDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
hipDeviceSynchronize();
hipMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), hipMemcpyDeviceToDevice);
hipMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), hipMemcpyHostToDevice);
//exploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_graph, d_children, d_cost, d_size, d_maxChildren);
complete = true;
hipMemcpy(waveMask, d_waveMask, size * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++){
if(waveMask[i] == 1){
complete = false;
}
}
}
// Make sure result is finished
hipDeviceSynchronize();
// Record end event
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
printf("GPU Wave Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
hipMemcpy(gpu_result, d_cost, size * sizeof(int), hipMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
int main (int argc, char **argv) {
if (argc !=3) {
printf("\nToo few arguments!\n");
abort();
}
// Get command line argument
int size = atoi(argv[1]);
int maxEdgesPerNode = atoi(argv[2]);
Node* nodes = generateGraph(size);
int* children = generateChildren(nodes, size, maxEdgesPerNode);
int* numChildren = transformNumChildren(nodes, size);
int* parentPtr = transformParentPtr(nodes, size);
int numEdges = parentPtr[size];
int* parent = transformParents(nodes, size, parentPtr);
/*for (int i = 0; i < size + 1; i++) {
printf("%i parentPtr: %i\n", i, parentPtr[i]);
}
for (int i = 0; i < size; i++) {
for (int j = parentPtr[i]; j < parentPtr[i + 1]; j++) {
printf("%i child: %i parent: %i\n", j, i, parent[j]);
}
}*/
Node* d_graph;
int *d_children, *d_size, *d_maxChildren, *d_numChildren, *d_parent, *d_parentPtr;
// Allocate space for device copies
hipMalloc((void **)&d_graph, size * sizeof(Node));
hipMalloc((void **)&d_size, sizeof(int));
hipMalloc((void **)&d_maxChildren, sizeof(int));
hipMalloc((void **)&d_children, size * maxEdgesPerNode * sizeof(int));
hipMalloc((void **)&d_numChildren, size * sizeof(int));
hipMalloc((void **)&d_parentPtr, (size + 1) * sizeof(int));
hipMalloc((void **)&d_parent, numEdges * sizeof(int));
// Copy inputs to device
hipMemcpy(d_graph, nodes, size * sizeof(Node), hipMemcpyHostToDevice);
hipMemcpy(d_size, &size, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_maxChildren, &maxEdgesPerNode, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_children, children, size * maxEdgesPerNode * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_numChildren, numChildren, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_parentPtr, parentPtr, (size + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_parent, parent, numEdges * sizeof(int), hipMemcpyHostToDevice);
//Synchronouse bfs
//vector< vector<Node*> > path = bfs(nodes, size);
clock_t start;
clock_t end;
start = clock();
int *synchResult = bfs(nodes, size);
end = clock();
printf("CPU Time= %.3f msec\n", (end - start) / (double) (CLOCKS_PER_SEC / 1000));
callDeviceCachedVisitBFS(d_graph, d_size, d_children, size, d_maxChildren, synchResult);
callChildListExploreWave(d_size, d_children, d_numChildren, size, d_maxChildren, synchResult);
//callFlipFlopWaveExplore(d_size, d_children, d_numChildren, size, d_maxChildren, maxEdgesPerNode, synchResult);
callFlipFlopParent(d_size, d_children, d_numChildren, d_maxChildren, d_parent, d_parentPtr, size, maxEdgesPerNode, synchResult);
// Cleanup
hipFree(d_graph);
hipFree(d_size);
hipFree(d_children);
hipFree(d_numChildren);
hipFree(d_maxChildren);
return 0;
}
Node::Node(int newValue) {
value = newValue;
explored = 0;
}
Node::Node() {
}
__host__ __device__ int Node::getValue() {
return value;
}
__host__ __device__ int* Node::getChildren() {
return children;
}
__host__ __device__ int Node::getNumChildren() {
return numChildren;
}
void Node::addChild(Node* child) {
children[numChildren] = child->getValue();
numChildren++;
return;
}
void Node::printNode() {
printf("Value: %i Children: [", value);
for (int i = 0; i < numChildren; i++) {
printf("%i", children[i]);
if (i != numChildren - 1) {
printf(", ");
}
}
printf("]\n");
return;
}
void Node::initializeChildren(int numEdges) {
children = new int[numEdges];
}
__host__ __device__ int Node::getExplored() {
return explored;
}
__device__ int Node::parallelSetExplored(int newExplored) {
return atomicExch(&explored, newExplored);
}
void Node::setExplored(int newExplored) {
explored = newExplored;
return;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23parentListBackwardsWavePiS_S_S_S_S_
.globl _Z23parentListBackwardsWavePiS_S_S_S_S_
.p2align 8
.type _Z23parentListBackwardsWavePiS_S_S_S_S_,@function
_Z23parentListBackwardsWavePiS_S_S_S_S_:
s_load_b64 s[8:9], s[0:1], 0x28
v_lshl_add_u32 v0, s15, 9, v0
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
s_load_b32 s2, s[8:9], 0x0
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s2, v0
s_cbranch_execz .LBB0_9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[0:1]
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, 0, v4
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_9
s_load_b64 s[10:11], s[0:1], 0x18
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s10, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v3, vcc_lo
global_load_b64 v[2:3], v[2:3], off
s_waitcnt vmcnt(0)
v_cmp_lt_i32_e32 vcc_lo, v2, v3
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_9
s_load_b64 s[10:11], s[0:1], 0x10
v_ashrrev_i32_e32 v5, 31, v2
v_mov_b32_e32 v4, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v6, vcc_lo, s10, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s11, v5, vcc_lo
s_mov_b32 s10, 0
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_5
.p2align 6
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s14
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, exec_lo, s13
s_or_b32 s10, s2, s10
s_and_not1_b32 s2, s11, exec_lo
s_and_b32 s11, s12, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s11, s2, s11
s_and_not1_b32 exec_lo, exec_lo, s10
s_cbranch_execz .LBB0_7
.LBB0_5:
s_delay_alu instid0(VALU_DEP_1)
v_dual_mov_b32 v4, v6 :: v_dual_mov_b32 v5, v7
global_load_b32 v6, v[6:7], off
s_or_b32 s12, s12, exec_lo
s_or_b32 s13, s13, exec_lo
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v7, 31, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[6:7]
v_add_co_u32 v6, vcc_lo, s4, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v7, vcc_lo
global_load_b32 v6, v[6:7], off
s_waitcnt vmcnt(0)
v_cmp_ne_u32_e32 vcc_lo, 1, v6
s_and_saveexec_b32 s14, vcc_lo
s_cbranch_execz .LBB0_4
v_add_nc_u32_e32 v2, 1, v2
v_add_co_u32 v6, s2, v4, 4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v7, s2, 0, v5, s2
v_cmp_ge_i32_e32 vcc_lo, v2, v3
s_and_not1_b32 s2, s13, exec_lo
s_and_not1_b32 s12, s12, exec_lo
s_and_b32 s13, vcc_lo, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s13, s2, s13
s_branch .LBB0_4
.LBB0_7:
s_set_inst_prefetch_distance 0x2
s_or_b32 exec_lo, exec_lo, s10
s_and_saveexec_b32 s2, s11
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s2, exec_lo, s2
s_cbranch_execz .LBB0_9
v_lshlrev_b64 v[2:3], 2, v[0:1]
v_dual_mov_b32 v6, 1 :: v_dual_mov_b32 v7, 0
s_load_b64 s[0:1], s[0:1], 0x20
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v8, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v3, vcc_lo
global_atomic_cmpswap_b32 v[8:9], v[6:7], off
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s0, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v4, 1, v4
global_store_b32 v[2:3], v4, off
.LBB0_9:
s_or_b32 exec_lo, exec_lo, s3
v_mov_b32_e32 v2, 0
s_mov_b32 s0, exec_lo
global_load_b32 v2, v2, s[8:9]
s_waitcnt vmcnt(0)
v_cmpx_lt_i32_e64 v0, v2
s_cbranch_execz .LBB0_12
v_lshlrev_b64 v[2:3], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, 2, v2
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_12
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_mov_b32_e32 v2, 2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_12:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23parentListBackwardsWavePiS_S_S_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 48
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23parentListBackwardsWavePiS_S_S_S_S_, .Lfunc_end0-_Z23parentListBackwardsWavePiS_S_S_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13backwardsWavePiS_S_S_S_S_S_
.globl _Z13backwardsWavePiS_S_S_S_S_S_
.p2align 8
.type _Z13backwardsWavePiS_S_S_S_S_S_,@function
_Z13backwardsWavePiS_S_S_S_S_S_:
s_load_b64 s[2:3], s[0:1], 0x28
v_lshl_add_u32 v0, s15, 9, v0
s_mov_b32 s12, exec_lo
s_waitcnt lgkmcnt(0)
s_load_b32 s8, s[2:3], 0x0
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s8, v0
s_cbranch_execz .LBB1_11
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[0:1]
v_add_co_u32 v2, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, 0, v2
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB1_11
s_load_b64 s[10:11], s[0:1], 0x30
s_waitcnt lgkmcnt(0)
s_load_b32 s9, s[10:11], 0x0
s_waitcnt lgkmcnt(0)
s_mul_i32 s13, s9, s8
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lt_i32 s13, 1
s_cbranch_scc1 .LBB1_11
s_ashr_i32 s14, s9, 31
s_mov_b32 s16, 0
s_add_i32 s9, s9, s14
s_mov_b32 s19, 0
s_xor_b32 s15, s9, s14
s_load_b64 s[8:9], s[0:1], 0x10
v_cvt_f32_u32_e32 v2, s15
s_sub_i32 s17, 0, s15
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v2, v2
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, 0x4f7ffffe, v2
v_cvt_u32_f32_e32 v4, v2
s_branch .LBB1_5
.LBB1_4:
s_or_b32 exec_lo, exec_lo, s23
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
s_and_b32 s21, exec_lo, s22
v_dual_mov_b32 v2, s10 :: v_dual_mov_b32 v3, s11
s_or_b32 s16, s21, s16
s_and_not1_b32 s10, s18, exec_lo
s_and_b32 s11, s20, exec_lo
s_or_b32 s18, s10, s11
s_and_not1_b32 exec_lo, exec_lo, s16
s_cbranch_execz .LBB1_9
.LBB1_5:
s_waitcnt lgkmcnt(0)
s_load_b32 s10, s[8:9], 0x0
s_waitcnt lgkmcnt(0)
v_cmp_ne_u32_e64 s21, s10, v0
v_cmp_eq_u32_e32 vcc_lo, s10, v0
s_and_saveexec_b32 s22, vcc_lo
s_cbranch_execz .LBB1_7
v_readfirstlane_b32 s10, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s11, s17, s10
s_mul_hi_u32 s11, s10, s11
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s10, s10, s11
s_mul_hi_u32 s10, s19, s10
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_mul_i32 s11, s10, s15
s_add_i32 s23, s10, 1
s_sub_i32 s11, s19, s11
s_sub_i32 s24, s11, s15
s_cmp_ge_u32 s11, s15
s_cselect_b32 s10, s23, s10
s_cselect_b32 s11, s24, s11
s_add_i32 s23, s10, 1
s_cmp_ge_u32 s11, s15
s_cselect_b32 s10, s23, s10
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_xor_b32 s10, s10, s14
s_sub_i32 s10, s10, s14
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s11, s10, 31
s_lshl_b64 s[24:25], s[10:11], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s24, s4, s24
s_addc_u32 s25, s5, s25
s_load_b32 s23, s[24:25], 0x0
s_waitcnt lgkmcnt(0)
s_cmp_lg_u32 s23, 1
s_cselect_b32 s23, -1, 0
s_and_not1_b32 s21, s21, exec_lo
s_and_b32 s24, s23, exec_lo
s_mov_b32 s23, -1
s_or_b32 s21, s21, s24
.LBB1_7:
s_or_b32 exec_lo, exec_lo, s22
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 s20, s20, exec_lo
s_and_b32 s23, s23, exec_lo
s_mov_b32 s22, -1
s_or_b32 s20, s20, s23
s_and_saveexec_b32 s23, s21
s_cbranch_execz .LBB1_4
s_add_i32 s19, s19, 1
s_add_u32 s8, s8, 4
s_addc_u32 s9, s9, 0
s_cmp_eq_u32 s13, s19
s_cselect_b32 s21, -1, 0
s_and_not1_b32 s20, s20, exec_lo
s_or_not1_b32 s22, s21, exec_lo
s_branch .LBB1_4
.LBB1_9:
s_or_b32 exec_lo, exec_lo, s16
s_and_saveexec_b32 s8, s18
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s8, exec_lo, s8
s_cbranch_execz .LBB1_11
s_load_b64 s[0:1], s[0:1], 0x20
v_lshlrev_b64 v[4:5], 2, v[0:1]
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_dual_mov_b32 v6, 1 :: v_dual_mov_b32 v7, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v8, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v5, vcc_lo
global_atomic_cmpswap_b32 v[8:9], v[6:7], off
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v6, 1, v2
v_add_co_u32 v2, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v5, vcc_lo
global_store_b32 v[2:3], v6, off
.LBB1_11:
s_or_b32 exec_lo, exec_lo, s12
v_mov_b32_e32 v2, 0
s_mov_b32 s0, exec_lo
global_load_b32 v2, v2, s[2:3]
s_waitcnt vmcnt(0)
v_cmpx_lt_i32_e64 v0, v2
s_cbranch_execz .LBB1_14
v_lshlrev_b64 v[2:3], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, 2, v2
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB1_14
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_mov_b32_e32 v2, 2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB1_14:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13backwardsWavePiS_S_S_S_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 56
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 26
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z13backwardsWavePiS_S_S_S_S_S_, .Lfunc_end1-_Z13backwardsWavePiS_S_S_S_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z20childListExploreWavePiS_S_S_S_S_S_
.globl _Z20childListExploreWavePiS_S_S_S_S_S_
.p2align 8
.type _Z20childListExploreWavePiS_S_S_S_S_S_,@function
_Z20childListExploreWavePiS_S_S_S_S_S_:
s_load_b64 s[2:3], s[0:1], 0x28
v_lshl_add_u32 v0, s15, 9, v0
s_mov_b32 s14, exec_lo
s_waitcnt lgkmcnt(0)
s_load_b32 s8, s[2:3], 0x0
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s8, v0
s_cbranch_execz .LBB2_7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[0:1]
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, 1, v4
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB2_7
s_load_b64 s[8:9], s[0:1], 0x18
s_mov_b32 s15, 0
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s8, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
global_load_b32 v8, v[2:3], off
s_waitcnt vmcnt(0)
v_cmp_lt_i32_e32 vcc_lo, 0, v8
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB2_7
s_clause 0x2
s_load_b64 s[8:9], s[0:1], 0x20
s_load_b64 s[10:11], s[0:1], 0x10
s_load_b64 s[0:1], s[0:1], 0x30
v_lshlrev_b64 v[4:5], 2, v[0:1]
v_dual_mov_b32 v9, 0 :: v_dual_mov_b32 v2, 1
v_mov_b32_e32 v3, 0
s_mov_b64 s[12:13], 0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, s8, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v5, vcc_lo
s_set_inst_prefetch_distance 0x1
s_branch .LBB2_5
.p2align 6
.LBB2_4:
s_or_b32 exec_lo, exec_lo, s16
s_add_u32 s12, s12, 1
s_addc_u32 s13, s13, 0
v_cmp_eq_u32_e32 vcc_lo, s12, v8
s_or_b32 s15, vcc_lo, s15
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s15
s_cbranch_execz .LBB2_7
.LBB2_5:
global_load_b32 v6, v9, s[0:1]
s_mov_b32 s16, exec_lo
s_waitcnt vmcnt(0)
v_mul_lo_u32 v6, v6, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v7, 31, v6
v_add_co_u32 v6, vcc_lo, s12, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s13, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[6:7]
v_add_co_u32 v6, vcc_lo, s10, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v7, vcc_lo, s11, v7, vcc_lo
global_load_b32 v6, v[6:7], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v7, 31, v6
v_lshlrev_b64 v[6:7], 2, v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v10, vcc_lo, s6, v6
v_add_co_ci_u32_e32 v11, vcc_lo, s7, v7, vcc_lo
v_add_co_u32 v12, vcc_lo, s4, v6
v_add_co_ci_u32_e32 v13, vcc_lo, s5, v7, vcc_lo
global_atomic_cmpswap_b32 v[10:11], v[2:3], off
global_load_b32 v10, v[12:13], off
s_waitcnt vmcnt(0)
v_cmpx_eq_u32_e32 0, v10
s_cbranch_execz .LBB2_4
global_load_b32 v10, v[4:5], off
v_add_co_u32 v6, vcc_lo, s8, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v7, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v10, 1, v10
global_store_b32 v[6:7], v10, off
s_branch .LBB2_4
.LBB2_7:
s_set_inst_prefetch_distance 0x2
s_or_b32 exec_lo, exec_lo, s14
v_mov_b32_e32 v2, 0
s_mov_b32 s0, exec_lo
global_load_b32 v2, v2, s[2:3]
s_waitcnt vmcnt(0)
v_cmpx_lt_i32_e64 v0, v2
s_cbranch_execz .LBB2_10
v_lshlrev_b64 v[2:3], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, 2, v2
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB2_10
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_mov_b32_e32 v2, 2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB2_10:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z20childListExploreWavePiS_S_S_S_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 56
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 17
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z20childListExploreWavePiS_S_S_S_S_S_, .Lfunc_end2-_Z20childListExploreWavePiS_S_S_S_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z11exploreWavePiS_P4NodeS_S_S_S_
.globl _Z11exploreWavePiS_P4NodeS_S_S_S_
.p2align 8
.type _Z11exploreWavePiS_P4NodeS_S_S_S_,@function
_Z11exploreWavePiS_P4NodeS_S_S_S_:
s_load_b64 s[2:3], s[0:1], 0x28
v_lshl_add_u32 v0, s15, 9, v0
s_mov_b32 s14, exec_lo
s_waitcnt lgkmcnt(0)
s_load_b32 s8, s[2:3], 0x0
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s8, v0
s_cbranch_execz .LBB3_7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[0:1]
v_add_co_u32 v2, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, 1, v2
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB3_7
s_load_b64 s[8:9], s[0:1], 0x10
s_mov_b32 s15, 0
s_waitcnt lgkmcnt(0)
v_mad_i64_i32 v[2:3], null, v0, 24, s[8:9]
global_load_b32 v8, v[2:3], off offset:16
s_waitcnt vmcnt(0)
v_cmp_lt_i32_e32 vcc_lo, 0, v8
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB3_7
s_clause 0x1
s_load_b128 s[8:11], s[0:1], 0x18
s_load_b64 s[0:1], s[0:1], 0x30
v_lshlrev_b64 v[4:5], 2, v[0:1]
v_dual_mov_b32 v9, 0 :: v_dual_mov_b32 v2, 1
v_mov_b32_e32 v3, 0
s_mov_b64 s[12:13], 0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, s10, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s11, v5, vcc_lo
s_set_inst_prefetch_distance 0x1
s_branch .LBB3_5
.p2align 6
.LBB3_4:
s_or_b32 exec_lo, exec_lo, s16
s_add_u32 s12, s12, 1
s_addc_u32 s13, s13, 0
v_cmp_eq_u32_e32 vcc_lo, s12, v8
s_or_b32 s15, vcc_lo, s15
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s15
s_cbranch_execz .LBB3_7
.LBB3_5:
global_load_b32 v6, v9, s[0:1]
s_mov_b32 s16, exec_lo
s_waitcnt vmcnt(0)
v_mul_lo_u32 v6, v6, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v7, 31, v6
v_add_co_u32 v6, vcc_lo, s12, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s13, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[6:7]
v_add_co_u32 v6, vcc_lo, s8, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v7, vcc_lo
global_load_b32 v6, v[6:7], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v7, 31, v6
v_lshlrev_b64 v[6:7], 2, v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v10, vcc_lo, s6, v6
v_add_co_ci_u32_e32 v11, vcc_lo, s7, v7, vcc_lo
v_add_co_u32 v12, vcc_lo, s4, v6
v_add_co_ci_u32_e32 v13, vcc_lo, s5, v7, vcc_lo
global_atomic_cmpswap_b32 v[10:11], v[2:3], off
global_load_b32 v10, v[12:13], off
s_waitcnt vmcnt(0)
v_cmpx_eq_u32_e32 0, v10
s_cbranch_execz .LBB3_4
global_load_b32 v10, v[4:5], off
v_add_co_u32 v6, vcc_lo, s10, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s11, v7, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v10, 1, v10
global_store_b32 v[6:7], v10, off
s_branch .LBB3_4
.LBB3_7:
s_set_inst_prefetch_distance 0x2
s_or_b32 exec_lo, exec_lo, s14
v_mov_b32_e32 v2, 0
s_mov_b32 s0, exec_lo
global_load_b32 v2, v2, s[2:3]
s_waitcnt vmcnt(0)
v_cmpx_lt_i32_e64 v0, v2
s_cbranch_execz .LBB3_10
v_lshlrev_b64 v[2:3], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, 2, v2
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB3_10
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_mov_b32_e32 v2, 2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB3_10:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11exploreWavePiS_P4NodeS_S_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 56
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 17
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z11exploreWavePiS_P4NodeS_S_S_S_, .Lfunc_end3-_Z11exploreWavePiS_P4NodeS_S_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z19setPreviousExploredPiS_S_
.globl _Z19setPreviousExploredPiS_S_
.p2align 8
.type _Z19setPreviousExploredPiS_S_,@function
_Z19setPreviousExploredPiS_S_:
s_load_b64 s[2:3], s[0:1], 0x10
v_lshl_add_u32 v0, s15, 9, v0
s_waitcnt lgkmcnt(0)
s_load_b32 s2, s[2:3], 0x0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v0
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB4_3
s_load_b64 s[2:3], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, 1, v2
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB4_3
s_load_b64 s[0:1], s[0:1], 0x8
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_mov_b32_e32 v2, 2
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB4_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19setPreviousExploredPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end4:
.size _Z19setPreviousExploredPiS_S_, .Lfunc_end4-_Z19setPreviousExploredPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 48
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23parentListBackwardsWavePiS_S_S_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23parentListBackwardsWavePiS_S_S_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 56
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13backwardsWavePiS_S_S_S_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 28
.sgpr_spill_count: 0
.symbol: _Z13backwardsWavePiS_S_S_S_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 56
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z20childListExploreWavePiS_S_S_S_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 19
.sgpr_spill_count: 0
.symbol: _Z20childListExploreWavePiS_S_S_S_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 56
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11exploreWavePiS_P4NodeS_S_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 19
.sgpr_spill_count: 0
.symbol: _Z11exploreWavePiS_P4NodeS_S_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19setPreviousExploredPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19setPreviousExploredPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#define DEBUG
__global__ void convol2D (float *a, float *h, float *c, int a_rows, int a_cols, int h_rows, int h_cols)
{
//Calculating indices along x and y directions
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
//Calculating Grid width in terms of number of threads
int grid_width = gridDim.x * blockDim.x;
int sum = 0;
for(int i = 0; i<h_rows; i++)
{
for(int j = 0; j<h_cols;j++)
{
if((index_y-i)>=0 & (index_y-i)<=(a_rows-1) & (index_x-j)>=0 & (index_x-j)<=(a_cols-1))
sum = sum + (a[(index_y-i)*a_cols+(index_x-j)] * h[i*h_cols + j]);
}
}
c[index_y*grid_width + index_x] = sum;
}
int main (int argc, char *argv[])
{
if(argc != 2)
{
fprintf(stderr,"Incorrect arguments passed.\nUse \"./2dconvol.o <input_file>\"\n");
exit(1);
}
FILE *f;
int a_cols = 1;
int h_cols = 1;
int a_rows = 0;
int h_rows = 0;
int c_cols = 0;
int c_rows = 0;
float *a_h = 0;
float *a_d = 0;
float *hinv_h = 0;
float *hinv_d = 0;
float *c_h = 0;
float *c_d = 0;
size_t a_size = 0;
size_t h_size = 0;
size_t c_size = 0;
dim3 block_size;
dim3 grid_size;
int i=0,j=0;
char junk,junk_old;
//Opening File
f = fopen(argv[1],"r");
//First pass to find out size of the matrices
junk = fgetc(f);
while (junk != EOF)
{
if(junk == '\n')
{
a_rows++;
}
else if(junk == 0x20 & a_rows == 0)
{
a_cols++;
}
junk_old = junk;
junk = fgetc(f);
if(junk == '\n' & junk == junk_old)
{
break;
}
}
junk = fgetc(f);
while (junk != EOF)
{
if(junk == '\n')
{
h_rows++;
}
else if(junk == 0x20 & h_rows == 0)
{
h_cols++;
}
junk = fgetc(f);
}
//Calculating op dimensions
c_rows = a_rows + h_rows - 1;
block_size.y = c_rows > 32 ? 32 : c_rows;
c_cols = a_cols + h_cols - 1;
block_size.x = c_cols > 16 ? 16 : c_cols;
grid_size.y = (c_rows/32)+1;
grid_size.x = (c_cols/16)+1;
#ifdef DEBUG
printf("Size of A: %dx%d\n",a_rows,a_cols);
printf("Size of H: %dx%d\n",h_rows,h_cols);
printf("Size of C: %dx%d\n",c_rows,c_cols);
printf("Size of grid: %dx%d\n",grid_size.y,grid_size.x);
printf("Size of block: %dx%d\n",block_size.y,block_size.x);
#endif
//Calculating the sizes of all the involved matrices
a_size = a_rows * a_cols *sizeof(float);
h_size = h_rows * h_cols *sizeof(float);
c_size = c_rows * c_cols *sizeof(float);
//Allocating memory on host
a_h = (float *) malloc(a_size);
hinv_h = (float *) malloc(h_size);
c_h = (float *) malloc(c_size);
//Rewinding file to read the actual data
rewind(f);
//Reading all the data matrices
for(i = 0;i<a_rows;i++)
{
for (j = 0; j<a_cols;j++)
fscanf(f,"%f",&a_h[i*a_cols + j]);
}
for(i = 0 ; i<h_rows;i++)
{
for (j = 0; j<h_cols ;j++)
{
fscanf(f,"%f",&hinv_h[i*h_cols + j]);
}
}
#ifdef DEBUG
for(i = 0;i<a_rows;i++)
{
for (j = 0; j<a_cols;j++)
printf("%f ",a_h[i*a_cols + j]);
printf("\n");
}
for(i = 0;i<h_rows;i++)
{
for (j = 0; j<h_cols;j++)
{
printf("%f ",hinv_h[i*h_cols + j]);
}
printf("\n");
}
printf("Completed Loading Matrices...\n");
#endif
//cudaMalloc to allocate required matrices on the device
cudaMalloc((void **)&a_d,a_size);
cudaMalloc((void **)&hinv_d,h_size);
cudaMalloc((void **)&c_d,c_size);
//Copying input data from the Host to Device
cudaMemcpy(a_d,a_h,a_size,cudaMemcpyHostToDevice);
cudaMemcpy(hinv_d,hinv_h,h_size,cudaMemcpyHostToDevice);
//Setting Op matrix to all zeros
cudaMemset(c_d,0,c_size);
//Convolution function
convol2D<<<grid_size,block_size>>>(a_d,hinv_d,c_d,a_rows,a_cols,h_rows,h_cols);
//Synchronize to wait for the kernel to complete exectution
cudaThreadSynchronize();
//Copy the output matrix from the Device to host
cudaMemcpy(c_h,c_d,c_size,cudaMemcpyDeviceToHost);
//Print Output
for(i=0;i<c_rows;i++)
{
for(j=0;j<c_cols;j++)
{
printf("%f ",c_h[i*c_cols + j]);
}
printf("\n");
}
//Freeing all the allocated memory from the device
cudaFree(a_d);
cudaFree(hinv_d);
cudaFree(c_d);
//Freeing all the allocated memory from the host
free(a_h);
free(hinv_h);
free(c_h);
fclose(f);
return 0;
} | code for sm_80
Function : _Z8convol2DPfS_S_iiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R4, RZ, RZ, 0x1 ; /* 0x00000001ff047424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0040*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x000fe200078e00ff */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0060*/ ISETP.LE.AND P0, PT, R4.reuse, c[0x0][0x184], PT ; /* 0x0000610004007a0c */
/* 0x040fe40003f03270 */
/*0070*/ S2R R7, SR_CTAID.Y ; /* 0x0000000000077919 */
/* 0x000e640000002600 */
/*0080*/ ISETP.GT.OR P0, PT, R4, c[0x0][0x180], !P0 ; /* 0x0000600004007a0c */
/* 0x000fc40004704670 */
/*0090*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*00a0*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fe400078e0203 */
/*00b0*/ IMAD R7, R7, c[0x0][0x4], R2 ; /* 0x0000010007077a24 */
/* 0x002fd000078e0202 */
/*00c0*/ @P0 BRA 0x780 ; /* 0x000006b000000947 */
/* 0x000fea0003800000 */
/*00d0*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff087624 */
/* 0x000fe200078e00ff */
/*00e0*/ HFMA2.MMA R6, -RZ, RZ, 0, 0 ; /* 0x00000000ff067435 */
/* 0x000fe200000001ff */
/*00f0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x1 ; /* 0x00000001ff097424 */
/* 0x000fe400078e00ff */
/*0100*/ IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a7224 */
/* 0x000fe200078e00ff */
/*0110*/ LOP3.LUT R8, R8, 0x3, RZ, 0xc0, !PT ; /* 0x0000000308087812 */
/* 0x000fe400078ec0ff */
/*0120*/ IADD3 R9, -R9, c[0x0][0x184], RZ ; /* 0x0000610009097a10 */
/* 0x000fe40007ffe1ff */
/*0130*/ IADD3 R11, -R8, c[0x0][0x184], RZ ; /* 0x00006100080b7a10 */
/* 0x000fe40007ffe1ff */
/*0140*/ ISETP.GE.U32.AND P1, PT, R9, 0x3, PT ; /* 0x000000030900780c */
/* 0x000fe20003f26070 */
/*0150*/ IMAD.IADD R12, R7, 0x1, -R10.reuse ; /* 0x00000001070c7824 */
/* 0x102fe200078e0a0a */
/*0160*/ ISETP.NE.AND P4, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f85270 */
/*0170*/ IMAD.MOV.U32 R14, RZ, RZ, R10 ; /* 0x000000ffff0e7224 */
/* 0x000fc400078e000a */
/*0180*/ IMAD.MOV.U32 R13, RZ, RZ, RZ ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e00ff */
/*0190*/ ISETP.GE.AND P0, PT, R12, c[0x0][0x178], PT ; /* 0x00005e000c007a0c */
/* 0x000fc80003f06270 */
/*01a0*/ ISETP.GE.AND P0, PT, R7, R10, !P0 ; /* 0x0000000a0700720c */
/* 0x000fe40004706270 */
/*01b0*/ IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0a7810 */
/* 0x000fc80007ffe0ff */
/*01c0*/ ISETP.GE.AND P5, PT, R10, c[0x0][0x180], PT ; /* 0x000060000a007a0c */
/* 0x000fe20003fa6270 */
/*01d0*/ @!P1 BRA 0x4b0 ; /* 0x000002d000009947 */
/* 0x005fee0003800000 */
/*01e0*/ IMAD.MOV.U32 R13, RZ, RZ, RZ ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e00ff */
/*01f0*/ MOV R15, R11 ; /* 0x0000000b000f7202 */
/* 0x000fc80000000f00 */
/*0200*/ IMAD.IADD R3, R0.reuse, 0x1, -R13.reuse ; /* 0x0000000100037824 */
/* 0x140fe200078e0a0d */
/*0210*/ ISETP.LT.OR P1, PT, R0, R13, !P0 ; /* 0x0000000d0000720c */
/* 0x000fe20004721670 */
/*0220*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fe400078e00ff */
/*0230*/ IMAD R4, R14, c[0x0][0x184], R13 ; /* 0x000061000e047a24 */
/* 0x000fe200078e020d */
/*0240*/ ISETP.GE.OR P1, PT, R3, c[0x0][0x17c], P1 ; /* 0x00005f0003007a0c */
/* 0x000fe20000f26670 */
/*0250*/ IMAD R3, R12, c[0x0][0x17c], R3 ; /* 0x00005f000c037a24 */
/* 0x000fe400078e0203 */
/*0260*/ IMAD.WIDE R4, R4, R2, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fc800078e0202 */
/*0270*/ IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fcc00078e0202 */
/*0280*/ @!P1 LDG.E R21, [R4.64] ; /* 0x0000000604159981 */
/* 0x000ea8000c1e1900 */
/*0290*/ @!P1 LDG.E R22, [R2.64] ; /* 0x0000000602169981 */
/* 0x000ea2000c1e1900 */
/*02a0*/ ISETP.LE.OR P2, PT, R0.reuse, R13, !P0 ; /* 0x0000000d0000720c */
/* 0x040fe40004743670 */
/*02b0*/ IADD3 R16, R0, -0x1, -R13 ; /* 0xffffffff00107810 */
/* 0x000fc80007ffe80d */
/*02c0*/ ISETP.GE.OR P2, PT, R16, c[0x0][0x17c], P2 ; /* 0x00005f0010007a0c */
/* 0x000fda0001746670 */
/*02d0*/ @!P2 LDG.E R16, [R4.64+0x4] ; /* 0x000004060410a981 */
/* 0x000ee8000c1e1900 */
/*02e0*/ @!P2 LDG.E R17, [R2.64+-0x4] ; /* 0xfffffc060211a981 */
/* 0x000ee2000c1e1900 */
/*02f0*/ IADD3 R19, R13, 0x2, RZ ; /* 0x000000020d137810 */
/* 0x000fc80007ffe0ff */
/*0300*/ ISETP.LT.OR P3, PT, R0.reuse, R19, !P0 ; /* 0x000000130000720c */
/* 0x040fe20004761670 */
/*0310*/ IMAD.IADD R19, R0, 0x1, -R19 ; /* 0x0000000100137824 */
/* 0x000fca00078e0a13 */
/*0320*/ ISETP.GE.OR P3, PT, R19, c[0x0][0x17c], P3 ; /* 0x00005f0013007a0c */
/* 0x000fda0001f66670 */
/*0330*/ @!P3 LDG.E R18, [R4.64+0x8] ; /* 0x000008060412b981 */
/* 0x000f28000c1e1900 */
/*0340*/ @!P3 LDG.E R19, [R2.64+-0x8] ; /* 0xfffff8060213b981 */
/* 0x000f22000c1e1900 */
/*0350*/ IADD3 R23, R13, 0x3, RZ ; /* 0x000000030d177810 */
/* 0x001fc80007ffe0ff */
/*0360*/ ISETP.LT.OR P6, PT, R0.reuse, R23, !P0 ; /* 0x000000170000720c */
/* 0x040fe200047c1670 */
/*0370*/ IMAD.IADD R23, R0, 0x1, -R23 ; /* 0x0000000100177824 */
/* 0x000fca00078e0a17 */
/*0380*/ ISETP.GE.OR P6, PT, R23, c[0x0][0x17c], P6 ; /* 0x00005f0017007a0c */
/* 0x000fda00037c6670 */
/*0390*/ @!P6 LDG.E R23, [R4.64+0xc] ; /* 0x00000c060417e981 */
/* 0x000168000c1e1900 */
/*03a0*/ @!P6 LDG.E R24, [R2.64+-0xc] ; /* 0xfffff4060218e981 */
/* 0x000f62000c1e1900 */
/*03b0*/ @!P1 I2F R20, R6 ; /* 0x0000000600149306 */
/* 0x002ea20000201400 */
/*03c0*/ IADD3 R15, R15, -0x4, RZ ; /* 0xfffffffc0f0f7810 */
/* 0x000fe40007ffe0ff */
/*03d0*/ IADD3 R13, R13, 0x4, RZ ; /* 0x000000040d0d7810 */
/* 0x000fe20007ffe0ff */
/*03e0*/ @!P1 FFMA R20, R21, R22, R20 ; /* 0x0000001615149223 */
/* 0x004fc80000000014 */
/*03f0*/ @!P1 F2I.TRUNC.NTZ R6, R20 ; /* 0x0000001400069305 */
/* 0x000e62000020f100 */
/*0400*/ ISETP.NE.AND P1, PT, R15, RZ, PT ; /* 0x000000ff0f00720c */
/* 0x000fce0003f25270 */
/*0410*/ @!P2 I2F R21, R6 ; /* 0x000000060015a306 */
/* 0x002ee40000201400 */
/*0420*/ @!P2 FFMA R16, R16, R17, R21 ; /* 0x000000111010a223 */
/* 0x008fcc0000000015 */
/*0430*/ @!P2 F2I.TRUNC.NTZ R6, R16 ; /* 0x000000100006a305 */
/* 0x000e70000020f100 */
/*0440*/ @!P3 I2F R17, R6 ; /* 0x000000060011b306 */
/* 0x002f240000201400 */
/*0450*/ @!P3 FFMA R18, R18, R19, R17 ; /* 0x000000131212b223 */
/* 0x010fcc0000000011 */
/*0460*/ @!P3 F2I.TRUNC.NTZ R6, R18 ; /* 0x000000120006b305 */
/* 0x000e30000020f100 */
/*0470*/ @!P6 I2F R4, R6 ; /* 0x000000060004e306 */
/* 0x001f640000201400 */
/*0480*/ @!P6 FFMA R23, R23, R24, R4 ; /* 0x000000181717e223 */
/* 0x020fcc0000000004 */
/*0490*/ @!P6 F2I.TRUNC.NTZ R6, R23 ; /* 0x000000170006e305 */
/* 0x000062000020f100 */
/*04a0*/ @P1 BRA 0x200 ; /* 0xfffffd5000001947 */
/* 0x000fea000383ffff */
/*04b0*/ @!P4 BRA 0x770 ; /* 0x000002b00000c947 */
/* 0x000fea0003800000 */
/*04c0*/ ISETP.LT.OR P1, PT, R0.reuse, R13.reuse, !P0 ; /* 0x0000000d0000720c */
/* 0x0c0fe20004721670 */
/*04d0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe200078e00ff */
/*04e0*/ IADD3 R3, R0, -R13, RZ ; /* 0x8000000d00037210 */
/* 0x000fe20007ffe0ff */
/*04f0*/ IMAD R2, R14, c[0x0][0x184], R13 ; /* 0x000061000e027a24 */
/* 0x000fe200078e020d */
/*0500*/ BSSY B0, 0x5d0 ; /* 0x000000c000007945 */
/* 0x000fe20003800000 */
/*0510*/ ISETP.NE.AND P2, PT, R8, 0x1, PT ; /* 0x000000010800780c */
/* 0x000fe40003f45270 */
/*0520*/ ISETP.GE.OR P1, PT, R3, c[0x0][0x17c], P1 ; /* 0x00005f0003007a0c */
/* 0x000fe20000f26670 */
/*0530*/ IMAD R4, R12, c[0x0][0x17c], R3 ; /* 0x00005f000c047a24 */
/* 0x000fe400078e0203 */
/*0540*/ IMAD.WIDE R2, R2, R5, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fc800078e0205 */
/*0550*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fcc00078e0205 */
/*0560*/ @P1 BRA 0x5c0 ; /* 0x0000005000001947 */
/* 0x000fea0003800000 */
/*0570*/ LDG.E R12, [R2.64] ; /* 0x00000006020c7981 */
/* 0x000ea8000c1e1900 */
/*0580*/ LDG.E R15, [R4.64] ; /* 0x00000006040f7981 */
/* 0x000ea2000c1e1900 */
/*0590*/ I2F R6, R6 ; /* 0x0000000600067306 */
/* 0x002ea40000201400 */
/*05a0*/ FFMA R12, R12, R15, R6 ; /* 0x0000000f0c0c7223 */
/* 0x004fcc0000000006 */
/*05b0*/ F2I.TRUNC.NTZ R6, R12 ; /* 0x0000000c00067305 */
/* 0x0002a4000020f100 */
/*05c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*05d0*/ @!P2 BRA 0x770 ; /* 0x000001900000a947 */
/* 0x000fea0003800000 */
/*05e0*/ ISETP.LE.OR P1, PT, R0.reuse, R13, !P0 ; /* 0x0000000d0000720c */
/* 0x040fe20004723670 */
/*05f0*/ BSSY B0, 0x6a0 ; /* 0x000000a000007945 */
/* 0x000fe20003800000 */
/*0600*/ IADD3 R12, R0, -0x1, -R13 ; /* 0xffffffff000c7810 */
/* 0x002fe40007ffe80d */
/*0610*/ ISETP.NE.AND P2, PT, R8, 0x2, PT ; /* 0x000000020800780c */
/* 0x000fe40003f45270 */
/*0620*/ ISETP.GE.OR P1, PT, R12, c[0x0][0x17c], P1 ; /* 0x00005f000c007a0c */
/* 0x000fda0000f26670 */
/*0630*/ @P1 BRA 0x690 ; /* 0x0000005000001947 */
/* 0x000fea0003800000 */
/*0640*/ LDG.E R12, [R2.64+0x4] ; /* 0x00000406020c7981 */
/* 0x000ee8000c1e1900 */
/*0650*/ LDG.E R15, [R4.64+-0x4] ; /* 0xfffffc06040f7981 */
/* 0x000ee2000c1e1900 */
/*0660*/ I2F R6, R6 ; /* 0x0000000600067306 */
/* 0x004ee40000201400 */
/*0670*/ FFMA R12, R12, R15, R6 ; /* 0x0000000f0c0c7223 */
/* 0x008fcc0000000006 */
/*0680*/ F2I.TRUNC.NTZ R6, R12 ; /* 0x0000000c00067305 */
/* 0x0002a4000020f100 */
/*0690*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*06a0*/ @!P2 BRA 0x770 ; /* 0x000000c00000a947 */
/* 0x000fea0003800000 */
/*06b0*/ IADD3 R13, R13, 0x2, RZ ; /* 0x000000020d0d7810 */
/* 0x000fe20007ffe0ff */
/*06c0*/ BSSY B0, 0x770 ; /* 0x000000a000007945 */
/* 0x000fe60003800000 */
/*06d0*/ ISETP.LT.OR P0, PT, R0.reuse, R13, !P0 ; /* 0x0000000d0000720c */
/* 0x040fe20004701670 */
/*06e0*/ IMAD.IADD R13, R0, 0x1, -R13 ; /* 0x00000001000d7824 */
/* 0x000fca00078e0a0d */
/*06f0*/ ISETP.GE.OR P0, PT, R13, c[0x0][0x17c], P0 ; /* 0x00005f000d007a0c */
/* 0x000fda0000706670 */
/*0700*/ @P0 BRA 0x760 ; /* 0x0000005000000947 */
/* 0x000fea0003800000 */
/*0710*/ LDG.E R2, [R2.64+0x8] ; /* 0x0000080602027981 */
/* 0x000ee8000c1e1900 */
/*0720*/ LDG.E R5, [R4.64+-0x8] ; /* 0xfffff80604057981 */
/* 0x000ee2000c1e1900 */
/*0730*/ I2F R12, R6 ; /* 0x00000006000c7306 */
/* 0x006ee40000201400 */
/*0740*/ FFMA R12, R2, R5, R12 ; /* 0x00000005020c7223 */
/* 0x008fcc000000000c */
/*0750*/ F2I.TRUNC.NTZ R6, R12 ; /* 0x0000000c00067305 */
/* 0x0002a4000020f100 */
/*0760*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0770*/ @!P5 BRA 0x140 ; /* 0xfffff9c00000d947 */
/* 0x000fea000383ffff */
/*0780*/ ULDC UR4, c[0x0][0xc] ; /* 0x0000030000047ab9 */
/* 0x000fe20000000800 */
/*0790*/ I2F R5, R6 ; /* 0x0000000600057306 */
/* 0x006e620000201400 */
/*07a0*/ ULDC UR5, c[0x0][0x0] ; /* 0x0000000000057ab9 */
/* 0x000fe20000000800 */
/*07b0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*07c0*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fcc000f8e023f */
/*07d0*/ IMAD R2, R7, UR4, R0 ; /* 0x0000000407027c24 */
/* 0x000fc8000f8e0200 */
/*07e0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */
/* 0x000fca00078e0203 */
/*07f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x002fe2000c101906 */
/*0800*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0810*/ BRA 0x810; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0820*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0830*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0840*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0850*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0860*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0870*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0880*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0890*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#define DEBUG
__global__ void convol2D (float *a, float *h, float *c, int a_rows, int a_cols, int h_rows, int h_cols)
{
//Calculating indices along x and y directions
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
//Calculating Grid width in terms of number of threads
int grid_width = gridDim.x * blockDim.x;
int sum = 0;
for(int i = 0; i<h_rows; i++)
{
for(int j = 0; j<h_cols;j++)
{
if((index_y-i)>=0 & (index_y-i)<=(a_rows-1) & (index_x-j)>=0 & (index_x-j)<=(a_cols-1))
sum = sum + (a[(index_y-i)*a_cols+(index_x-j)] * h[i*h_cols + j]);
}
}
c[index_y*grid_width + index_x] = sum;
}
int main (int argc, char *argv[])
{
if(argc != 2)
{
fprintf(stderr,"Incorrect arguments passed.\nUse \"./2dconvol.o <input_file>\"\n");
exit(1);
}
FILE *f;
int a_cols = 1;
int h_cols = 1;
int a_rows = 0;
int h_rows = 0;
int c_cols = 0;
int c_rows = 0;
float *a_h = 0;
float *a_d = 0;
float *hinv_h = 0;
float *hinv_d = 0;
float *c_h = 0;
float *c_d = 0;
size_t a_size = 0;
size_t h_size = 0;
size_t c_size = 0;
dim3 block_size;
dim3 grid_size;
int i=0,j=0;
char junk,junk_old;
//Opening File
f = fopen(argv[1],"r");
//First pass to find out size of the matrices
junk = fgetc(f);
while (junk != EOF)
{
if(junk == '\n')
{
a_rows++;
}
else if(junk == 0x20 & a_rows == 0)
{
a_cols++;
}
junk_old = junk;
junk = fgetc(f);
if(junk == '\n' & junk == junk_old)
{
break;
}
}
junk = fgetc(f);
while (junk != EOF)
{
if(junk == '\n')
{
h_rows++;
}
else if(junk == 0x20 & h_rows == 0)
{
h_cols++;
}
junk = fgetc(f);
}
//Calculating op dimensions
c_rows = a_rows + h_rows - 1;
block_size.y = c_rows > 32 ? 32 : c_rows;
c_cols = a_cols + h_cols - 1;
block_size.x = c_cols > 16 ? 16 : c_cols;
grid_size.y = (c_rows/32)+1;
grid_size.x = (c_cols/16)+1;
#ifdef DEBUG
printf("Size of A: %dx%d\n",a_rows,a_cols);
printf("Size of H: %dx%d\n",h_rows,h_cols);
printf("Size of C: %dx%d\n",c_rows,c_cols);
printf("Size of grid: %dx%d\n",grid_size.y,grid_size.x);
printf("Size of block: %dx%d\n",block_size.y,block_size.x);
#endif
//Calculating the sizes of all the involved matrices
a_size = a_rows * a_cols *sizeof(float);
h_size = h_rows * h_cols *sizeof(float);
c_size = c_rows * c_cols *sizeof(float);
//Allocating memory on host
a_h = (float *) malloc(a_size);
hinv_h = (float *) malloc(h_size);
c_h = (float *) malloc(c_size);
//Rewinding file to read the actual data
rewind(f);
//Reading all the data matrices
for(i = 0;i<a_rows;i++)
{
for (j = 0; j<a_cols;j++)
fscanf(f,"%f",&a_h[i*a_cols + j]);
}
for(i = 0 ; i<h_rows;i++)
{
for (j = 0; j<h_cols ;j++)
{
fscanf(f,"%f",&hinv_h[i*h_cols + j]);
}
}
#ifdef DEBUG
for(i = 0;i<a_rows;i++)
{
for (j = 0; j<a_cols;j++)
printf("%f ",a_h[i*a_cols + j]);
printf("\n");
}
for(i = 0;i<h_rows;i++)
{
for (j = 0; j<h_cols;j++)
{
printf("%f ",hinv_h[i*h_cols + j]);
}
printf("\n");
}
printf("Completed Loading Matrices...\n");
#endif
//cudaMalloc to allocate required matrices on the device
cudaMalloc((void **)&a_d,a_size);
cudaMalloc((void **)&hinv_d,h_size);
cudaMalloc((void **)&c_d,c_size);
//Copying input data from the Host to Device
cudaMemcpy(a_d,a_h,a_size,cudaMemcpyHostToDevice);
cudaMemcpy(hinv_d,hinv_h,h_size,cudaMemcpyHostToDevice);
//Setting Op matrix to all zeros
cudaMemset(c_d,0,c_size);
//Convolution function
convol2D<<<grid_size,block_size>>>(a_d,hinv_d,c_d,a_rows,a_cols,h_rows,h_cols);
//Synchronize to wait for the kernel to complete exectution
cudaThreadSynchronize();
//Copy the output matrix from the Device to host
cudaMemcpy(c_h,c_d,c_size,cudaMemcpyDeviceToHost);
//Print Output
for(i=0;i<c_rows;i++)
{
for(j=0;j<c_cols;j++)
{
printf("%f ",c_h[i*c_cols + j]);
}
printf("\n");
}
//Freeing all the allocated memory from the device
cudaFree(a_d);
cudaFree(hinv_d);
cudaFree(c_d);
//Freeing all the allocated memory from the host
free(a_h);
free(hinv_h);
free(c_h);
fclose(f);
return 0;
} | .file "tmpxft_00022709_00000000-6_2dconvol.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z8convol2DPfS_S_iiiiPfS_S_iiii
.type _Z35__device_stub__Z8convol2DPfS_S_iiiiPfS_S_iiii, @function
_Z35__device_stub__Z8convol2DPfS_S_iiiiPfS_S_iiii:
.LFB2082:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z8convol2DPfS_S_iiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z35__device_stub__Z8convol2DPfS_S_iiiiPfS_S_iiii, .-_Z35__device_stub__Z8convol2DPfS_S_iiiiPfS_S_iiii
.globl _Z8convol2DPfS_S_iiii
.type _Z8convol2DPfS_S_iiii, @function
_Z8convol2DPfS_S_iiii:
.LFB2083:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z35__device_stub__Z8convol2DPfS_S_iiiiPfS_S_iiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z8convol2DPfS_S_iiii, .-_Z8convol2DPfS_S_iiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Incorrect arguments passed.\nUse \"./2dconvol.o <input_file>\"\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "r"
.LC2:
.string "Size of A: %dx%d\n"
.LC3:
.string "Size of H: %dx%d\n"
.LC4:
.string "Size of C: %dx%d\n"
.LC5:
.string "Size of grid: %dx%d\n"
.LC6:
.string "Size of block: %dx%d\n"
.LC7:
.string "%f"
.LC8:
.string "%f "
.LC9:
.string "\n"
.section .rodata.str1.8
.align 8
.LC10:
.string "Completed Loading Matrices...\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $200, %rsp
.cfi_def_cfa_offset 256
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
cmpl $2, %edi
jne .L66
movq $0, 136(%rsp)
movq $0, 144(%rsp)
movq $0, 152(%rsp)
movl $1, 168(%rsp)
movl $1, 180(%rsp)
movq 8(%rsi), %rdi
leaq .LC1(%rip), %rsi
call fopen@PLT
movq %rax, %rbx
movq %rax, %rdi
call fgetc@PLT
movl %eax, %ebp
movl $0, %r12d
movl $1, %r13d
jmp .L13
.L66:
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L67:
addl $1, %r12d
jmp .L15
.L55:
movl %eax, %ebp
.L13:
cmpb $-1, %bpl
je .L16
cmpb $10, %bpl
je .L67
cmpb $32, %bpl
jne .L15
testl %r12d, %r12d
sete %al
cmpb $1, %al
sbbl $-1, %r13d
.L15:
movq %rbx, %rdi
call fgetc@PLT
cmpb $10, %al
jne .L55
cmpb %al, %bpl
jne .L55
.L16:
movl %r13d, 48(%rsp)
movl %r12d, 16(%rsp)
movq %rbx, %rdi
call fgetc@PLT
movl %eax, %edx
cmpb $-1, %al
je .L54
movl $0, %ebp
movl $1, %r12d
jmp .L22
.L69:
addl $1, %ebp
.L21:
movq %rbx, %rdi
call fgetc@PLT
movl %eax, %edx
cmpb $-1, %al
je .L68
.L22:
cmpb $10, %dl
je .L69
cmpb $32, %dl
jne .L21
testl %ebp, %ebp
sete %al
cmpb $1, %al
sbbl $-1, %r12d
jmp .L21
.L68:
movl %r12d, 4(%rsp)
movl %ebp, 8(%rsp)
.L19:
movl 16(%rsp), %r14d
movl 8(%rsp), %r13d
leal (%r14,%r13), %esi
movl %esi, 104(%rsp)
leal -1(%rsi), %ebp
movl $32, %eax
cmpl %eax, %ebp
cmovle %ebp, %eax
movl %eax, 64(%rsp)
movl 48(%rsp), %r12d
movl 4(%rsp), %ecx
leal (%r12,%rcx), %r10d
movl %r10d, 108(%rsp)
leal -1(%r10), %r15d
movl %r15d, 120(%rsp)
movl $16, %eax
cmpl %eax, %r15d
cmovle %r15d, %eax
movl %eax, 68(%rsp)
leal 30(%rsi), %eax
testl %ebp, %ebp
cmovns %ebp, %eax
sarl $5, %eax
leal 1(%rax), %esi
movl %esi, 72(%rsp)
leal 14(%r10), %eax
testl %r15d, %r15d
cmovns %r15d, %eax
sarl $4, %eax
leal 1(%rax), %r11d
movl %r11d, 76(%rsp)
movl %r12d, %ecx
movl %r14d, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 4(%rsp), %ecx
movl %r13d, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r15d, %ecx
movl %ebp, %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 76(%rsp), %ecx
movl 72(%rsp), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 68(%rsp), %ecx
movl 64(%rsp), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r14d, %eax
imull %r12d, %eax
cltq
leaq 0(,%rax,4), %rdx
movq %rdx, 88(%rsp)
movl 4(%rsp), %ecx
imull %ecx, %r13d
movl %r13d, %eax
cltq
leaq 0(,%rax,4), %r13
movq %r13, 96(%rsp)
movl %r15d, %eax
imull %ebp, %eax
cltq
leaq 0(,%rax,4), %r15
movq %r15, 80(%rsp)
movq %rdx, %rdi
call malloc@PLT
movq %rax, 24(%rsp)
movq %r13, %rdi
call malloc@PLT
movq %rax, 32(%rsp)
movq %r15, %rdi
call malloc@PLT
movq %rax, 40(%rsp)
movq %rbx, %rdi
call rewind@PLT
testl %r14d, %r14d
jle .L23
movl %r12d, 52(%rsp)
movl $0, %r14d
movl $0, %r15d
leaq .LC7(%rip), %r13
movl %ebp, 56(%rsp)
jmp .L24
.L54:
movl $0, 8(%rsp)
movl $1, 4(%rsp)
jmp .L19
.L27:
movslq %r15d, %rsi
movq 24(%rsp), %rcx
leaq (%rcx,%rsi,4), %rbp
movslq 48(%rsp), %rax
addq %rsi, %rax
leaq (%rcx,%rax,4), %r12
.L25:
movq %rbp, %rdx
movq %r13, %rsi
movq %rbx, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
addq $4, %rbp
cmpq %r12, %rbp
jne .L25
.L28:
addl $1, %r14d
movl 52(%rsp), %eax
addl %eax, %r15d
cmpl %r14d, 16(%rsp)
je .L26
.L24:
cmpl $0, 48(%rsp)
jg .L27
jmp .L28
.L32:
movslq %r15d, %rsi
movq 32(%rsp), %rcx
leaq (%rcx,%rsi,4), %rbp
movslq 4(%rsp), %rax
addq %rsi, %rax
leaq (%rcx,%rax,4), %r12
.L30:
movq %rbp, %rdx
movq %r13, %rsi
movq %rbx, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
addq $4, %rbp
cmpq %r12, %rbp
jne .L30
.L33:
addl $1, %r14d
movl 52(%rsp), %eax
addl %eax, %r15d
movl 8(%rsp), %eax
cmpl %eax, %r14d
je .L31
.L29:
cmpl $0, 4(%rsp)
jg .L32
jmp .L33
.L31:
movl 56(%rsp), %ebp
cmpl $0, 16(%rsp)
jle .L34
.L52:
movl 48(%rsp), %eax
movl %eax, 52(%rsp)
movl $0, %r15d
movl $0, %r14d
cltq
movq %rax, 56(%rsp)
leaq .LC8(%rip), %r13
movq %rbx, 112(%rsp)
movl %ebp, 124(%rsp)
movq 24(%rsp), %r12
jmp .L35
.L38:
movslq %r15d, %rax
leaq (%r12,%rax,4), %rbx
movq 56(%rsp), %rcx
addq %rcx, %rax
leaq (%r12,%rax,4), %rbp
.L36:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L36
.L39:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r14d
movl 52(%rsp), %eax
addl %eax, %r15d
cmpl %r14d, 16(%rsp)
jle .L37
.L35:
cmpl $0, 48(%rsp)
jg .L38
jmp .L39
.L37:
movq 112(%rsp), %rbx
movl 124(%rsp), %ebp
cmpl $0, 8(%rsp)
jle .L40
.L34:
movl 4(%rsp), %eax
movl %eax, 52(%rsp)
movl $0, %r15d
movl $0, %r14d
cltq
movq %rax, 56(%rsp)
leaq .LC8(%rip), %r13
movq %rbx, 112(%rsp)
movl %ebp, 124(%rsp)
movq 32(%rsp), %r12
jmp .L41
.L43:
movslq %r15d, %rax
leaq (%r12,%rax,4), %rbx
movq 56(%rsp), %rcx
addq %rcx, %rax
leaq (%r12,%rax,4), %rbp
.L42:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L42
.L44:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r14d
movl 52(%rsp), %eax
addl %eax, %r15d
movl 8(%rsp), %eax
cmpl %eax, %r14d
je .L63
.L41:
cmpl $0, 4(%rsp)
jg .L43
jmp .L44
.L63:
movq 112(%rsp), %rbx
movl 124(%rsp), %ebp
.L40:
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 136(%rsp), %rdi
movq 88(%rsp), %r13
movq %r13, %rsi
call cudaMalloc@PLT
leaq 144(%rsp), %rdi
movq 96(%rsp), %r15
movq %r15, %rsi
call cudaMalloc@PLT
leaq 152(%rsp), %rdi
movq 80(%rsp), %r14
movq %r14, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r13, %rdx
movq 24(%rsp), %rsi
movq 136(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r15, %rdx
movq 32(%rsp), %rsi
movq 144(%rsp), %rdi
call cudaMemcpy@PLT
movq %r14, %rdx
movl $0, %esi
movq 152(%rsp), %rdi
call cudaMemset@PLT
movl 76(%rsp), %eax
movl %eax, 172(%rsp)
movl 72(%rsp), %eax
movl %eax, 176(%rsp)
movl 68(%rsp), %eax
movl %eax, 160(%rsp)
movl 64(%rsp), %eax
movl %eax, 164(%rsp)
movl 168(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 160(%rsp), %rdx
movq 172(%rsp), %rdi
movl 180(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L70
.L45:
call cudaThreadSynchronize@PLT
movl $2, %ecx
movq 80(%rsp), %rdx
movq 152(%rsp), %rsi
movq 40(%rsp), %r12
movq %r12, %rdi
call cudaMemcpy@PLT
testl %ebp, %ebp
jle .L46
movl 104(%rsp), %r15d
subl $1, %r15d
movl $0, %r14d
movl $0, %r13d
movl 108(%rsp), %eax
subl $2, %eax
movl %eax, 48(%rsp)
leaq 4(%r12), %rax
movq %rax, 8(%rsp)
leaq .LC8(%rip), %r12
movq %rbx, 16(%rsp)
movl %r15d, 4(%rsp)
movl 120(%rsp), %r15d
jmp .L47
.L70:
subq $8, %rsp
.cfi_def_cfa_offset 264
movl 12(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 272
movl 24(%rsp), %r9d
movl 64(%rsp), %r8d
movl 32(%rsp), %ecx
movq 168(%rsp), %rdx
movq 160(%rsp), %rsi
movq 152(%rsp), %rdi
call _Z35__device_stub__Z8convol2DPfS_S_iiiiPfS_S_iiii
addq $16, %rsp
.cfi_def_cfa_offset 256
jmp .L45
.L49:
movslq %r14d, %rdx
movq 40(%rsp), %rax
leaq (%rax,%rdx,4), %rbx
movl 48(%rsp), %eax
addq %rdx, %rax
movq 8(%rsp), %rcx
leaq (%rcx,%rax,4), %rbp
.L48:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L48
.L50:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addl %r15d, %r14d
movl 4(%rsp), %eax
cmpl %eax, %r13d
je .L64
.L47:
testl %r15d, %r15d
jg .L49
jmp .L50
.L64:
movq 16(%rsp), %rbx
.L46:
movq 136(%rsp), %rdi
call cudaFree@PLT
movq 144(%rsp), %rdi
call cudaFree@PLT
movq 152(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call free@PLT
movq 32(%rsp), %rdi
call free@PLT
movq 40(%rsp), %rdi
call free@PLT
movq %rbx, %rdi
call fclose@PLT
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L71
movl $0, %eax
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
movl 56(%rsp), %ebp
cmpl $0, 8(%rsp)
jle .L52
.L51:
movl 4(%rsp), %eax
movl %eax, 52(%rsp)
movl $0, %r14d
movl $0, %r15d
leaq .LC7(%rip), %r13
movl %ebp, 56(%rsp)
jmp .L29
.L23:
cmpl $0, 8(%rsp)
jg .L51
jmp .L40
.L71:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC11:
.string "_Z8convol2DPfS_S_iiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z8convol2DPfS_S_iiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#define DEBUG
__global__ void convol2D (float *a, float *h, float *c, int a_rows, int a_cols, int h_rows, int h_cols)
{
//Calculating indices along x and y directions
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
//Calculating Grid width in terms of number of threads
int grid_width = gridDim.x * blockDim.x;
int sum = 0;
for(int i = 0; i<h_rows; i++)
{
for(int j = 0; j<h_cols;j++)
{
if((index_y-i)>=0 & (index_y-i)<=(a_rows-1) & (index_x-j)>=0 & (index_x-j)<=(a_cols-1))
sum = sum + (a[(index_y-i)*a_cols+(index_x-j)] * h[i*h_cols + j]);
}
}
c[index_y*grid_width + index_x] = sum;
}
int main (int argc, char *argv[])
{
if(argc != 2)
{
fprintf(stderr,"Incorrect arguments passed.\nUse \"./2dconvol.o <input_file>\"\n");
exit(1);
}
FILE *f;
int a_cols = 1;
int h_cols = 1;
int a_rows = 0;
int h_rows = 0;
int c_cols = 0;
int c_rows = 0;
float *a_h = 0;
float *a_d = 0;
float *hinv_h = 0;
float *hinv_d = 0;
float *c_h = 0;
float *c_d = 0;
size_t a_size = 0;
size_t h_size = 0;
size_t c_size = 0;
dim3 block_size;
dim3 grid_size;
int i=0,j=0;
char junk,junk_old;
//Opening File
f = fopen(argv[1],"r");
//First pass to find out size of the matrices
junk = fgetc(f);
while (junk != EOF)
{
if(junk == '\n')
{
a_rows++;
}
else if(junk == 0x20 & a_rows == 0)
{
a_cols++;
}
junk_old = junk;
junk = fgetc(f);
if(junk == '\n' & junk == junk_old)
{
break;
}
}
junk = fgetc(f);
while (junk != EOF)
{
if(junk == '\n')
{
h_rows++;
}
else if(junk == 0x20 & h_rows == 0)
{
h_cols++;
}
junk = fgetc(f);
}
//Calculating op dimensions
c_rows = a_rows + h_rows - 1;
block_size.y = c_rows > 32 ? 32 : c_rows;
c_cols = a_cols + h_cols - 1;
block_size.x = c_cols > 16 ? 16 : c_cols;
grid_size.y = (c_rows/32)+1;
grid_size.x = (c_cols/16)+1;
#ifdef DEBUG
printf("Size of A: %dx%d\n",a_rows,a_cols);
printf("Size of H: %dx%d\n",h_rows,h_cols);
printf("Size of C: %dx%d\n",c_rows,c_cols);
printf("Size of grid: %dx%d\n",grid_size.y,grid_size.x);
printf("Size of block: %dx%d\n",block_size.y,block_size.x);
#endif
//Calculating the sizes of all the involved matrices
a_size = a_rows * a_cols *sizeof(float);
h_size = h_rows * h_cols *sizeof(float);
c_size = c_rows * c_cols *sizeof(float);
//Allocating memory on host
a_h = (float *) malloc(a_size);
hinv_h = (float *) malloc(h_size);
c_h = (float *) malloc(c_size);
//Rewinding file to read the actual data
rewind(f);
//Reading all the data matrices
for(i = 0;i<a_rows;i++)
{
for (j = 0; j<a_cols;j++)
fscanf(f,"%f",&a_h[i*a_cols + j]);
}
for(i = 0 ; i<h_rows;i++)
{
for (j = 0; j<h_cols ;j++)
{
fscanf(f,"%f",&hinv_h[i*h_cols + j]);
}
}
#ifdef DEBUG
for(i = 0;i<a_rows;i++)
{
for (j = 0; j<a_cols;j++)
printf("%f ",a_h[i*a_cols + j]);
printf("\n");
}
for(i = 0;i<h_rows;i++)
{
for (j = 0; j<h_cols;j++)
{
printf("%f ",hinv_h[i*h_cols + j]);
}
printf("\n");
}
printf("Completed Loading Matrices...\n");
#endif
//cudaMalloc to allocate required matrices on the device
cudaMalloc((void **)&a_d,a_size);
cudaMalloc((void **)&hinv_d,h_size);
cudaMalloc((void **)&c_d,c_size);
//Copying input data from the Host to Device
cudaMemcpy(a_d,a_h,a_size,cudaMemcpyHostToDevice);
cudaMemcpy(hinv_d,hinv_h,h_size,cudaMemcpyHostToDevice);
//Setting Op matrix to all zeros
cudaMemset(c_d,0,c_size);
//Convolution function
convol2D<<<grid_size,block_size>>>(a_d,hinv_d,c_d,a_rows,a_cols,h_rows,h_cols);
//Synchronize to wait for the kernel to complete exectution
cudaThreadSynchronize();
//Copy the output matrix from the Device to host
cudaMemcpy(c_h,c_d,c_size,cudaMemcpyDeviceToHost);
//Print Output
for(i=0;i<c_rows;i++)
{
for(j=0;j<c_cols;j++)
{
printf("%f ",c_h[i*c_cols + j]);
}
printf("\n");
}
//Freeing all the allocated memory from the device
cudaFree(a_d);
cudaFree(hinv_d);
cudaFree(c_d);
//Freeing all the allocated memory from the host
free(a_h);
free(hinv_h);
free(c_h);
fclose(f);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define DEBUG
__global__ void convol2D (float *a, float *h, float *c, int a_rows, int a_cols, int h_rows, int h_cols)
{
//Calculating indices along x and y directions
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
//Calculating Grid width in terms of number of threads
int grid_width = gridDim.x * blockDim.x;
int sum = 0;
for(int i = 0; i<h_rows; i++)
{
for(int j = 0; j<h_cols;j++)
{
if((index_y-i)>=0 & (index_y-i)<=(a_rows-1) & (index_x-j)>=0 & (index_x-j)<=(a_cols-1))
sum = sum + (a[(index_y-i)*a_cols+(index_x-j)] * h[i*h_cols + j]);
}
}
c[index_y*grid_width + index_x] = sum;
}
int main (int argc, char *argv[])
{
if(argc != 2)
{
fprintf(stderr,"Incorrect arguments passed.\nUse \"./2dconvol.o <input_file>\"\n");
exit(1);
}
FILE *f;
int a_cols = 1;
int h_cols = 1;
int a_rows = 0;
int h_rows = 0;
int c_cols = 0;
int c_rows = 0;
float *a_h = 0;
float *a_d = 0;
float *hinv_h = 0;
float *hinv_d = 0;
float *c_h = 0;
float *c_d = 0;
size_t a_size = 0;
size_t h_size = 0;
size_t c_size = 0;
dim3 block_size;
dim3 grid_size;
int i=0,j=0;
char junk,junk_old;
//Opening File
f = fopen(argv[1],"r");
//First pass to find out size of the matrices
junk = fgetc(f);
while (junk != EOF)
{
if(junk == '\n')
{
a_rows++;
}
else if(junk == 0x20 & a_rows == 0)
{
a_cols++;
}
junk_old = junk;
junk = fgetc(f);
if(junk == '\n' & junk == junk_old)
{
break;
}
}
junk = fgetc(f);
while (junk != EOF)
{
if(junk == '\n')
{
h_rows++;
}
else if(junk == 0x20 & h_rows == 0)
{
h_cols++;
}
junk = fgetc(f);
}
//Calculating op dimensions
c_rows = a_rows + h_rows - 1;
block_size.y = c_rows > 32 ? 32 : c_rows;
c_cols = a_cols + h_cols - 1;
block_size.x = c_cols > 16 ? 16 : c_cols;
grid_size.y = (c_rows/32)+1;
grid_size.x = (c_cols/16)+1;
#ifdef DEBUG
printf("Size of A: %dx%d\n",a_rows,a_cols);
printf("Size of H: %dx%d\n",h_rows,h_cols);
printf("Size of C: %dx%d\n",c_rows,c_cols);
printf("Size of grid: %dx%d\n",grid_size.y,grid_size.x);
printf("Size of block: %dx%d\n",block_size.y,block_size.x);
#endif
//Calculating the sizes of all the involved matrices
a_size = a_rows * a_cols *sizeof(float);
h_size = h_rows * h_cols *sizeof(float);
c_size = c_rows * c_cols *sizeof(float);
//Allocating memory on host
a_h = (float *) malloc(a_size);
hinv_h = (float *) malloc(h_size);
c_h = (float *) malloc(c_size);
//Rewinding file to read the actual data
rewind(f);
//Reading all the data matrices
for(i = 0;i<a_rows;i++)
{
for (j = 0; j<a_cols;j++)
fscanf(f,"%f",&a_h[i*a_cols + j]);
}
for(i = 0 ; i<h_rows;i++)
{
for (j = 0; j<h_cols ;j++)
{
fscanf(f,"%f",&hinv_h[i*h_cols + j]);
}
}
#ifdef DEBUG
for(i = 0;i<a_rows;i++)
{
for (j = 0; j<a_cols;j++)
printf("%f ",a_h[i*a_cols + j]);
printf("\n");
}
for(i = 0;i<h_rows;i++)
{
for (j = 0; j<h_cols;j++)
{
printf("%f ",hinv_h[i*h_cols + j]);
}
printf("\n");
}
printf("Completed Loading Matrices...\n");
#endif
//cudaMalloc to allocate required matrices on the device
hipMalloc((void **)&a_d,a_size);
hipMalloc((void **)&hinv_d,h_size);
hipMalloc((void **)&c_d,c_size);
//Copying input data from the Host to Device
hipMemcpy(a_d,a_h,a_size,hipMemcpyHostToDevice);
hipMemcpy(hinv_d,hinv_h,h_size,hipMemcpyHostToDevice);
//Setting Op matrix to all zeros
hipMemset(c_d,0,c_size);
//Convolution function
convol2D<<<grid_size,block_size>>>(a_d,hinv_d,c_d,a_rows,a_cols,h_rows,h_cols);
//Synchronize to wait for the kernel to complete exectution
hipDeviceSynchronize();
//Copy the output matrix from the Device to host
hipMemcpy(c_h,c_d,c_size,hipMemcpyDeviceToHost);
//Print Output
for(i=0;i<c_rows;i++)
{
for(j=0;j<c_cols;j++)
{
printf("%f ",c_h[i*c_cols + j]);
}
printf("\n");
}
//Freeing all the allocated memory from the device
hipFree(a_d);
hipFree(hinv_d);
hipFree(c_d);
//Freeing all the allocated memory from the host
free(a_h);
free(hinv_h);
free(c_h);
fclose(f);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define DEBUG
__global__ void convol2D (float *a, float *h, float *c, int a_rows, int a_cols, int h_rows, int h_cols)
{
//Calculating indices along x and y directions
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
//Calculating Grid width in terms of number of threads
int grid_width = gridDim.x * blockDim.x;
int sum = 0;
for(int i = 0; i<h_rows; i++)
{
for(int j = 0; j<h_cols;j++)
{
if((index_y-i)>=0 & (index_y-i)<=(a_rows-1) & (index_x-j)>=0 & (index_x-j)<=(a_cols-1))
sum = sum + (a[(index_y-i)*a_cols+(index_x-j)] * h[i*h_cols + j]);
}
}
c[index_y*grid_width + index_x] = sum;
}
int main (int argc, char *argv[])
{
if(argc != 2)
{
fprintf(stderr,"Incorrect arguments passed.\nUse \"./2dconvol.o <input_file>\"\n");
exit(1);
}
FILE *f;
int a_cols = 1;
int h_cols = 1;
int a_rows = 0;
int h_rows = 0;
int c_cols = 0;
int c_rows = 0;
float *a_h = 0;
float *a_d = 0;
float *hinv_h = 0;
float *hinv_d = 0;
float *c_h = 0;
float *c_d = 0;
size_t a_size = 0;
size_t h_size = 0;
size_t c_size = 0;
dim3 block_size;
dim3 grid_size;
int i=0,j=0;
char junk,junk_old;
//Opening File
f = fopen(argv[1],"r");
//First pass to find out size of the matrices
junk = fgetc(f);
while (junk != EOF)
{
if(junk == '\n')
{
a_rows++;
}
else if(junk == 0x20 & a_rows == 0)
{
a_cols++;
}
junk_old = junk;
junk = fgetc(f);
if(junk == '\n' & junk == junk_old)
{
break;
}
}
junk = fgetc(f);
while (junk != EOF)
{
if(junk == '\n')
{
h_rows++;
}
else if(junk == 0x20 & h_rows == 0)
{
h_cols++;
}
junk = fgetc(f);
}
//Calculating op dimensions
c_rows = a_rows + h_rows - 1;
block_size.y = c_rows > 32 ? 32 : c_rows;
c_cols = a_cols + h_cols - 1;
block_size.x = c_cols > 16 ? 16 : c_cols;
grid_size.y = (c_rows/32)+1;
grid_size.x = (c_cols/16)+1;
#ifdef DEBUG
printf("Size of A: %dx%d\n",a_rows,a_cols);
printf("Size of H: %dx%d\n",h_rows,h_cols);
printf("Size of C: %dx%d\n",c_rows,c_cols);
printf("Size of grid: %dx%d\n",grid_size.y,grid_size.x);
printf("Size of block: %dx%d\n",block_size.y,block_size.x);
#endif
//Calculating the sizes of all the involved matrices
a_size = a_rows * a_cols *sizeof(float);
h_size = h_rows * h_cols *sizeof(float);
c_size = c_rows * c_cols *sizeof(float);
//Allocating memory on host
a_h = (float *) malloc(a_size);
hinv_h = (float *) malloc(h_size);
c_h = (float *) malloc(c_size);
//Rewinding file to read the actual data
rewind(f);
//Reading all the data matrices
for(i = 0;i<a_rows;i++)
{
for (j = 0; j<a_cols;j++)
fscanf(f,"%f",&a_h[i*a_cols + j]);
}
for(i = 0 ; i<h_rows;i++)
{
for (j = 0; j<h_cols ;j++)
{
fscanf(f,"%f",&hinv_h[i*h_cols + j]);
}
}
#ifdef DEBUG
for(i = 0;i<a_rows;i++)
{
for (j = 0; j<a_cols;j++)
printf("%f ",a_h[i*a_cols + j]);
printf("\n");
}
for(i = 0;i<h_rows;i++)
{
for (j = 0; j<h_cols;j++)
{
printf("%f ",hinv_h[i*h_cols + j]);
}
printf("\n");
}
printf("Completed Loading Matrices...\n");
#endif
//cudaMalloc to allocate required matrices on the device
hipMalloc((void **)&a_d,a_size);
hipMalloc((void **)&hinv_d,h_size);
hipMalloc((void **)&c_d,c_size);
//Copying input data from the Host to Device
hipMemcpy(a_d,a_h,a_size,hipMemcpyHostToDevice);
hipMemcpy(hinv_d,hinv_h,h_size,hipMemcpyHostToDevice);
//Setting Op matrix to all zeros
hipMemset(c_d,0,c_size);
//Convolution function
convol2D<<<grid_size,block_size>>>(a_d,hinv_d,c_d,a_rows,a_cols,h_rows,h_cols);
//Synchronize to wait for the kernel to complete exectution
hipDeviceSynchronize();
//Copy the output matrix from the Device to host
hipMemcpy(c_h,c_d,c_size,hipMemcpyDeviceToHost);
//Print Output
for(i=0;i<c_rows;i++)
{
for(j=0;j<c_cols;j++)
{
printf("%f ",c_h[i*c_cols + j]);
}
printf("\n");
}
//Freeing all the allocated memory from the device
hipFree(a_d);
hipFree(hinv_d);
hipFree(c_d);
//Freeing all the allocated memory from the host
free(a_h);
free(hinv_h);
free(c_h);
fclose(f);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8convol2DPfS_S_iiii
.globl _Z8convol2DPfS_S_iiii
.p2align 8
.type _Z8convol2DPfS_S_iiii,@function
_Z8convol2DPfS_S_iiii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s16, s[0:1], 0x20
s_load_b32 s12, s[0:1], 0x28
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s13, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s14, s13, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
s_cmp_lt_i32 s16, 1
s_cbranch_scc1 .LBB0_9
s_clause 0x2
s_load_b32 s14, s[0:1], 0x24
s_load_b64 s[8:9], s[0:1], 0x18
s_load_b128 s[4:7], s[0:1], 0x0
v_mov_b32_e32 v3, 0
s_mov_b32 s11, 0
s_mov_b32 s17, 0
s_mov_b32 s18, 0
s_waitcnt lgkmcnt(0)
s_cmp_gt_i32 s14, 0
v_mul_lo_u32 v2, s9, v1
s_cselect_b32 s15, -1, 0
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1)
v_subrev_nc_u32_e32 v2, s9, v2
s_add_i32 s18, s18, 1
s_add_i32 s17, s17, s14
s_cmp_eq_u32 s18, s16
s_cbranch_scc1 .LBB0_8
.LBB0_3:
s_and_not1_b32 vcc_lo, exec_lo, s15
s_cbranch_vccnz .LBB0_2
v_subrev_nc_u32_e32 v4, s18, v1
v_mov_b32_e32 v5, v0
s_mov_b32 s10, s17
s_mov_b32 s19, s14
s_delay_alu instid0(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s8, v4
s_branch .LBB0_6
.p2align 6
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s3
v_add_nc_u32_e32 v5, -1, v5
s_add_i32 s19, s19, -1
s_add_i32 s10, s10, 1
s_cmp_eq_u32 s19, 0
s_cbranch_scc1 .LBB0_2
.LBB0_6:
v_or_b32_e32 v6, v5, v4
v_cmp_gt_i32_e64 s3, s9, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_lt_i32_e64 s2, -1, v6
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_5
v_add_nc_u32_e32 v6, v2, v5
s_lshl_b64 s[20:21], s[10:11], 2
v_cvt_f32_i32_e32 v3, v3
s_add_u32 s20, s6, s20
s_addc_u32 s21, s7, s21
v_ashrrev_i32_e32 v7, 31, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[6:7]
v_add_co_u32 v6, s2, s4, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v7, s2, s5, v7, s2
s_load_b32 s2, s[20:21], 0x0
global_load_b32 v6, v[6:7], off
s_waitcnt vmcnt(0) lgkmcnt(0)
v_fmac_f32_e32 v3, s2, v6
v_cvt_i32_f32_e32 v3, v3
s_branch .LBB0_5
.LBB0_8:
s_set_inst_prefetch_distance 0x2
v_cvt_f32_i32_e32 v2, v3
s_branch .LBB0_10
.LBB0_9:
v_mov_b32_e32 v2, 0
.LBB0_10:
s_mul_i32 s12, s12, s13
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[3:4], null, s12, v1, v[0:1]
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8convol2DPfS_S_iiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 22
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8convol2DPfS_S_iiii, .Lfunc_end0-_Z8convol2DPfS_S_iiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8convol2DPfS_S_iiii
.private_segment_fixed_size: 0
.sgpr_count: 24
.sgpr_spill_count: 0
.symbol: _Z8convol2DPfS_S_iiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define DEBUG
__global__ void convol2D (float *a, float *h, float *c, int a_rows, int a_cols, int h_rows, int h_cols)
{
//Calculating indices along x and y directions
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
//Calculating Grid width in terms of number of threads
int grid_width = gridDim.x * blockDim.x;
int sum = 0;
for(int i = 0; i<h_rows; i++)
{
for(int j = 0; j<h_cols;j++)
{
if((index_y-i)>=0 & (index_y-i)<=(a_rows-1) & (index_x-j)>=0 & (index_x-j)<=(a_cols-1))
sum = sum + (a[(index_y-i)*a_cols+(index_x-j)] * h[i*h_cols + j]);
}
}
c[index_y*grid_width + index_x] = sum;
}
int main (int argc, char *argv[])
{
if(argc != 2)
{
fprintf(stderr,"Incorrect arguments passed.\nUse \"./2dconvol.o <input_file>\"\n");
exit(1);
}
FILE *f;
int a_cols = 1;
int h_cols = 1;
int a_rows = 0;
int h_rows = 0;
int c_cols = 0;
int c_rows = 0;
float *a_h = 0;
float *a_d = 0;
float *hinv_h = 0;
float *hinv_d = 0;
float *c_h = 0;
float *c_d = 0;
size_t a_size = 0;
size_t h_size = 0;
size_t c_size = 0;
dim3 block_size;
dim3 grid_size;
int i=0,j=0;
char junk,junk_old;
//Opening File
f = fopen(argv[1],"r");
//First pass to find out size of the matrices
junk = fgetc(f);
while (junk != EOF)
{
if(junk == '\n')
{
a_rows++;
}
else if(junk == 0x20 & a_rows == 0)
{
a_cols++;
}
junk_old = junk;
junk = fgetc(f);
if(junk == '\n' & junk == junk_old)
{
break;
}
}
junk = fgetc(f);
while (junk != EOF)
{
if(junk == '\n')
{
h_rows++;
}
else if(junk == 0x20 & h_rows == 0)
{
h_cols++;
}
junk = fgetc(f);
}
//Calculating op dimensions
c_rows = a_rows + h_rows - 1;
block_size.y = c_rows > 32 ? 32 : c_rows;
c_cols = a_cols + h_cols - 1;
block_size.x = c_cols > 16 ? 16 : c_cols;
grid_size.y = (c_rows/32)+1;
grid_size.x = (c_cols/16)+1;
#ifdef DEBUG
printf("Size of A: %dx%d\n",a_rows,a_cols);
printf("Size of H: %dx%d\n",h_rows,h_cols);
printf("Size of C: %dx%d\n",c_rows,c_cols);
printf("Size of grid: %dx%d\n",grid_size.y,grid_size.x);
printf("Size of block: %dx%d\n",block_size.y,block_size.x);
#endif
//Calculating the sizes of all the involved matrices
a_size = a_rows * a_cols *sizeof(float);
h_size = h_rows * h_cols *sizeof(float);
c_size = c_rows * c_cols *sizeof(float);
//Allocating memory on host
a_h = (float *) malloc(a_size);
hinv_h = (float *) malloc(h_size);
c_h = (float *) malloc(c_size);
//Rewinding file to read the actual data
rewind(f);
//Reading all the data matrices
for(i = 0;i<a_rows;i++)
{
for (j = 0; j<a_cols;j++)
fscanf(f,"%f",&a_h[i*a_cols + j]);
}
for(i = 0 ; i<h_rows;i++)
{
for (j = 0; j<h_cols ;j++)
{
fscanf(f,"%f",&hinv_h[i*h_cols + j]);
}
}
#ifdef DEBUG
for(i = 0;i<a_rows;i++)
{
for (j = 0; j<a_cols;j++)
printf("%f ",a_h[i*a_cols + j]);
printf("\n");
}
for(i = 0;i<h_rows;i++)
{
for (j = 0; j<h_cols;j++)
{
printf("%f ",hinv_h[i*h_cols + j]);
}
printf("\n");
}
printf("Completed Loading Matrices...\n");
#endif
//cudaMalloc to allocate required matrices on the device
hipMalloc((void **)&a_d,a_size);
hipMalloc((void **)&hinv_d,h_size);
hipMalloc((void **)&c_d,c_size);
//Copying input data from the Host to Device
hipMemcpy(a_d,a_h,a_size,hipMemcpyHostToDevice);
hipMemcpy(hinv_d,hinv_h,h_size,hipMemcpyHostToDevice);
//Setting Op matrix to all zeros
hipMemset(c_d,0,c_size);
//Convolution function
convol2D<<<grid_size,block_size>>>(a_d,hinv_d,c_d,a_rows,a_cols,h_rows,h_cols);
//Synchronize to wait for the kernel to complete exectution
hipDeviceSynchronize();
//Copy the output matrix from the Device to host
hipMemcpy(c_h,c_d,c_size,hipMemcpyDeviceToHost);
//Print Output
for(i=0;i<c_rows;i++)
{
for(j=0;j<c_cols;j++)
{
printf("%f ",c_h[i*c_cols + j]);
}
printf("\n");
}
//Freeing all the allocated memory from the device
hipFree(a_d);
hipFree(hinv_d);
hipFree(c_d);
//Freeing all the allocated memory from the host
free(a_h);
free(hinv_h);
free(c_h);
fclose(f);
return 0;
} | .text
.file "2dconvol.hip"
.globl _Z23__device_stub__convol2DPfS_S_iiii # -- Begin function _Z23__device_stub__convol2DPfS_S_iiii
.p2align 4, 0x90
.type _Z23__device_stub__convol2DPfS_S_iiii,@function
_Z23__device_stub__convol2DPfS_S_iiii: # @_Z23__device_stub__convol2DPfS_S_iiii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z8convol2DPfS_S_iiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z23__device_stub__convol2DPfS_S_iiii, .Lfunc_end0-_Z23__device_stub__convol2DPfS_S_iiii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $328, %rsp # imm = 0x148
.cfi_def_cfa_offset 384
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
cmpl $2, %edi
jne .LBB1_46
# %bb.1:
movq $0, 72(%rsp)
movq $0, 64(%rsp)
movq $0, 24(%rsp)
movq 8(%rsi), %rdi
movl $.L.str.1, %esi
callq fopen
movq %rax, %rbx
movq %rax, %rdi
callq fgetc
movl %eax, %ebp
xorl %r15d, %r15d
movl $0, %r13d
movl $1, %eax
movq %rax, 8(%rsp) # 8-byte Spill
cmpb $-1, %bpl
je .LBB1_8
# %bb.2: # %.preheader179.preheader
movl $1, %eax
movq %rax, 8(%rsp) # 8-byte Spill
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB1_3: # %.preheader179
# =>This Inner Loop Header: Depth=1
shll $24, %ebp
cmpl $167772160, %ebp # imm = 0xA000000
jne .LBB1_5
# %bb.4: # in Loop: Header=BB1_3 Depth=1
incl %r13d
jmp .LBB1_6
.p2align 4, 0x90
.LBB1_5: # in Loop: Header=BB1_3 Depth=1
movl %ebp, %eax
xorl $536870912, %eax # imm = 0x20000000
xorl %ecx, %ecx
orl %r13d, %eax
sete %cl
movq 8(%rsp), %rax # 8-byte Reload
addl %ecx, %eax
movq %rax, 8(%rsp) # 8-byte Spill
.LBB1_6: # in Loop: Header=BB1_3 Depth=1
movq %rbx, %rdi
callq fgetc
movl %eax, %edx
shll $24, %edx
movl %edx, %ecx
xorl $167772160, %ecx # imm = 0xA000000
xorl $167772160, %ebp # imm = 0xA000000
orl %ecx, %ebp
setne %cl
cmpl $-16777216, %edx # imm = 0xFF000000
je .LBB1_8
# %bb.7: # in Loop: Header=BB1_3 Depth=1
movl %eax, %ebp
testb %cl, %cl
jne .LBB1_3
.LBB1_8: # %.loopexit
movl $1, %r14d
jmp .LBB1_9
.p2align 4, 0x90
.LBB1_12: # in Loop: Header=BB1_9 Depth=1
xorl $536870912, %eax # imm = 0x20000000
xorl %ecx, %ecx
orl %r15d, %eax
sete %cl
addl %ecx, %r14d
.LBB1_9: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
callq fgetc
shll $24, %eax
cmpl $-16777216, %eax # imm = 0xFF000000
je .LBB1_13
# %bb.10: # in Loop: Header=BB1_9 Depth=1
cmpl $167772160, %eax # imm = 0xA000000
jne .LBB1_12
# %bb.11: # in Loop: Header=BB1_9 Depth=1
incl %r15d
jmp .LBB1_9
.LBB1_13:
leal (%r15,%r13), %ecx
decl %ecx
movl %ecx, 20(%rsp) # 4-byte Spill
cmpl $32, %ecx
movl $32, %eax
cmovll %ecx, %eax
movq %rax, 40(%rsp) # 8-byte Spill
movq 8(%rsp), %r12 # 8-byte Reload
leal (%r12,%r14), %esi
decl %esi
movl %esi, 16(%rsp) # 4-byte Spill
movq %r12, %rax
cmpl $16, %esi
movl $16, %edx
cmovll %esi, %edx
movq %rdx, 128(%rsp) # 8-byte Spill
leal (%r15,%r13), %r12d
addl $30, %r12d
testl %ecx, %ecx
cmovnsl %ecx, %r12d
sarl $5, %r12d
incl %r12d
leal (%rax,%r14), %ebp
addl $14, %ebp
movq %rax, %rdx
movq %rax, 8(%rsp) # 8-byte Spill
testl %esi, %esi
cmovnsl %esi, %ebp
sarl $4, %ebp
incl %ebp
movl $.L.str.2, %edi
movl %r13d, %esi
# kill: def $edx killed $edx killed $rdx
xorl %eax, %eax
callq printf
movl $.L.str.3, %edi
movl %r15d, %esi
movl %r14d, %edx
xorl %eax, %eax
callq printf
movl $.L.str.4, %edi
movl 20(%rsp), %esi # 4-byte Reload
movl 16(%rsp), %edx # 4-byte Reload
xorl %eax, %eax
callq printf
movl $.L.str.5, %edi
movq %r12, 88(%rsp) # 8-byte Spill
movl %r12d, %esi
movq %rbp, 168(%rsp) # 8-byte Spill
movl %ebp, %edx
xorl %eax, %eax
callq printf
movl $.L.str.6, %edi
movq 40(%rsp), %rsi # 8-byte Reload
# kill: def $esi killed $esi killed $rsi
movq 128(%rsp), %rdx # 8-byte Reload
# kill: def $edx killed $edx killed $rdx
xorl %eax, %eax
callq printf
movq 8(%rsp), %r12 # 8-byte Reload
movl %r12d, %eax
imull %r13d, %eax
movslq %eax, %rdi
shlq $2, %rdi
movq %r14, 104(%rsp) # 8-byte Spill
movl %r14d, %eax
movq %r15, 80(%rsp) # 8-byte Spill
imull %r15d, %eax
movslq %eax, %r15
shlq $2, %r15
movl 16(%rsp), %eax # 4-byte Reload
imull 20(%rsp), %eax # 4-byte Folded Reload
movslq %eax, %r14
shlq $2, %r14
movq %rdi, 176(%rsp) # 8-byte Spill
callq malloc
movq %rax, 56(%rsp) # 8-byte Spill
movq %r15, 184(%rsp) # 8-byte Spill
movq %r15, %rdi
callq malloc
movq %rax, 48(%rsp) # 8-byte Spill
movq %r14, 192(%rsp) # 8-byte Spill
movq %r14, %rdi
callq malloc
movq %rax, 96(%rsp) # 8-byte Spill
movq %rbx, %rdi
callq rewind
movl %r13d, %eax
movq %rax, 152(%rsp) # 8-byte Spill
movl %r12d, %r15d
movq %r13, 136(%rsp) # 8-byte Spill
testl %r13d, %r13d
jle .LBB1_19
# %bb.14: # %.preheader178.lr.ph
xorl %ebp, %ebp
xorl %r14d, %r14d
jmp .LBB1_15
.p2align 4, 0x90
.LBB1_18: # %._crit_edge
# in Loop: Header=BB1_15 Depth=1
incq %r14
movq 8(%rsp), %r12 # 8-byte Reload
addl %r12d, %ebp
cmpq 152(%rsp), %r14 # 8-byte Folded Reload
je .LBB1_19
.LBB1_15: # %.preheader178
# =>This Loop Header: Depth=1
# Child Loop BB1_17 Depth 2
testl %r12d, %r12d
jle .LBB1_18
# %bb.16: # in Loop: Header=BB1_15 Depth=1
movl %ebp, %eax
movq 56(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
movq %r15, %r12
.p2align 4, 0x90
.LBB1_17: # Parent Loop BB1_15 Depth=1
# => This Inner Loop Header: Depth=2
movl $.L.str.7, %esi
movq %rbx, %rdi
movq %r13, %rdx
xorl %eax, %eax
callq __isoc23_fscanf
addq $4, %r13
decq %r12
jne .LBB1_17
jmp .LBB1_18
.LBB1_19: # %.preheader177
movq %rbx, 144(%rsp) # 8-byte Spill
movq 80(%rsp), %rcx # 8-byte Reload
movl %ecx, %eax
movq %rax, 160(%rsp) # 8-byte Spill
movq 104(%rsp), %rax # 8-byte Reload
movl %eax, %ebx
testl %ecx, %ecx
je .LBB1_25
# %bb.20: # %.preheader176.lr.ph
movl $0, 36(%rsp) # 4-byte Folded Spill
xorl %r14d, %r14d
movq 144(%rsp), %r12 # 8-byte Reload
jmp .LBB1_21
.p2align 4, 0x90
.LBB1_24: # %._crit_edge184
# in Loop: Header=BB1_21 Depth=1
incq %r14
movq 104(%rsp), %rax # 8-byte Reload
addl %eax, 36(%rsp) # 4-byte Folded Spill
cmpq 160(%rsp), %r14 # 8-byte Folded Reload
je .LBB1_25
.LBB1_21: # %.preheader176
# =>This Loop Header: Depth=1
# Child Loop BB1_23 Depth 2
testl %eax, %eax
jle .LBB1_24
# %bb.22: # in Loop: Header=BB1_21 Depth=1
movl 36(%rsp), %eax # 4-byte Reload
movq 48(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
movq %rbx, %rbp
.p2align 4, 0x90
.LBB1_23: # Parent Loop BB1_21 Depth=1
# => This Inner Loop Header: Depth=2
movl $.L.str.7, %esi
movq %r12, %rdi
movq %r13, %rdx
xorl %eax, %eax
callq __isoc23_fscanf
addq $4, %r13
decq %rbp
jne .LBB1_23
jmp .LBB1_24
.LBB1_25: # %.preheader175
shlq $32, 40(%rsp) # 8-byte Folded Spill
shlq $32, 88(%rsp) # 8-byte Folded Spill
cmpl $0, 136(%rsp) # 4-byte Folded Reload
movq 8(%rsp), %rax # 8-byte Reload
jle .LBB1_31
# %bb.26: # %.preheader174.lr.ph
xorl %r13d, %r13d
xorl %r12d, %r12d
jmp .LBB1_27
.p2align 4, 0x90
.LBB1_30: # %._crit_edge188
# in Loop: Header=BB1_27 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r12
movq 8(%rsp), %rax # 8-byte Reload
addl %eax, %r13d
cmpq 152(%rsp), %r12 # 8-byte Folded Reload
je .LBB1_31
.LBB1_27: # %.preheader174
# =>This Loop Header: Depth=1
# Child Loop BB1_29 Depth 2
testl %eax, %eax
jle .LBB1_30
# %bb.28: # %.lr.ph187
# in Loop: Header=BB1_27 Depth=1
movl %r13d, %eax
movq 56(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r14
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_29: # Parent Loop BB1_27 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r14,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
incq %rbp
cmpq %rbp, %r15
jne .LBB1_29
jmp .LBB1_30
.LBB1_31: # %.preheader173
movq 128(%rsp), %rax # 8-byte Reload
addq %rax, 40(%rsp) # 8-byte Folded Spill
movq 168(%rsp), %rax # 8-byte Reload
addq %rax, 88(%rsp) # 8-byte Folded Spill
cmpl $0, 80(%rsp) # 4-byte Folded Reload
movq 104(%rsp), %rbp # 8-byte Reload
je .LBB1_37
# %bb.32: # %.preheader172.lr.ph
xorl %r15d, %r15d
xorl %r14d, %r14d
jmp .LBB1_33
.p2align 4, 0x90
.LBB1_36: # %._crit_edge192
# in Loop: Header=BB1_33 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addl %ebp, %r15d
cmpq 160(%rsp), %r14 # 8-byte Folded Reload
je .LBB1_37
.LBB1_33: # %.preheader172
# =>This Loop Header: Depth=1
# Child Loop BB1_35 Depth 2
testl %ebp, %ebp
jle .LBB1_36
# %bb.34: # %.lr.ph191
# in Loop: Header=BB1_33 Depth=1
movl %r15d, %eax
movq 48(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r12
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB1_35: # Parent Loop BB1_33 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r12,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
incq %r13
cmpq %r13, %rbx
jne .LBB1_35
jmp .LBB1_36
.LBB1_37: # %._crit_edge194
movl $.Lstr, %edi
callq puts@PLT
leaq 72(%rsp), %rdi
movq 176(%rsp), %r15 # 8-byte Reload
movq %r15, %rsi
callq hipMalloc
leaq 64(%rsp), %rdi
movq 184(%rsp), %r14 # 8-byte Reload
movq %r14, %rsi
callq hipMalloc
leaq 24(%rsp), %rdi
movq 192(%rsp), %rbx # 8-byte Reload
movq %rbx, %rsi
callq hipMalloc
movq 72(%rsp), %rdi
movq 56(%rsp), %rsi # 8-byte Reload
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq 64(%rsp), %rdi
movq 48(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movq 88(%rsp), %rdi # 8-byte Reload
movl $1, %esi
movq 40(%rsp), %rdx # 8-byte Reload
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
movq 136(%rsp), %rsi # 8-byte Reload
jne .LBB1_39
# %bb.38:
movq 72(%rsp), %rax
movq 64(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 264(%rsp)
movq %rcx, 256(%rsp)
movq %rdx, 248(%rsp)
movl %esi, 124(%rsp)
movq 8(%rsp), %rax # 8-byte Reload
movl %eax, 120(%rsp)
movq 80(%rsp), %rax # 8-byte Reload
movl %eax, 116(%rsp)
movl %ebp, 112(%rsp)
leaq 264(%rsp), %rax
movq %rax, 272(%rsp)
leaq 256(%rsp), %rax
movq %rax, 280(%rsp)
leaq 248(%rsp), %rax
movq %rax, 288(%rsp)
leaq 124(%rsp), %rax
movq %rax, 296(%rsp)
leaq 120(%rsp), %rax
movq %rax, 304(%rsp)
leaq 116(%rsp), %rax
movq %rax, 312(%rsp)
leaq 112(%rsp), %rax
movq %rax, 320(%rsp)
leaq 232(%rsp), %rdi
leaq 216(%rsp), %rsi
leaq 208(%rsp), %rdx
leaq 200(%rsp), %rcx
callq __hipPopCallConfiguration
movq 232(%rsp), %rsi
movl 240(%rsp), %edx
movq 216(%rsp), %rcx
movl 224(%rsp), %r8d
leaq 272(%rsp), %r9
movl $_Z8convol2DPfS_S_iiii, %edi
pushq 200(%rsp)
.cfi_adjust_cfa_offset 8
pushq 216(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_39:
callq hipDeviceSynchronize
movq 24(%rsp), %rsi
movq 96(%rsp), %rdi # 8-byte Reload
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
movl 20(%rsp), %ecx # 4-byte Reload
testl %ecx, %ecx
movl 16(%rsp), %eax # 4-byte Reload
jle .LBB1_45
# %bb.40: # %.preheader.lr.ph
movl %ecx, %r14d
movl %eax, %r15d
xorl %r12d, %r12d
xorl %r13d, %r13d
jmp .LBB1_41
.p2align 4, 0x90
.LBB1_44: # %._crit_edge197
# in Loop: Header=BB1_41 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r13
movl 16(%rsp), %eax # 4-byte Reload
addl %eax, %r12d
cmpq %r14, %r13
je .LBB1_45
.LBB1_41: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_43 Depth 2
testl %eax, %eax
jle .LBB1_44
# %bb.42: # %.lr.ph196
# in Loop: Header=BB1_41 Depth=1
movl %r12d, %eax
movq 96(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_43: # Parent Loop BB1_41 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rbx,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
incq %rbp
cmpq %rbp, %r15
jne .LBB1_43
jmp .LBB1_44
.LBB1_45: # %._crit_edge199
movq 72(%rsp), %rdi
callq hipFree
movq 64(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi # 8-byte Reload
callq free
movq 48(%rsp), %rdi # 8-byte Reload
callq free
movq 96(%rsp), %rdi # 8-byte Reload
callq free
movq 144(%rsp), %rdi # 8-byte Reload
callq fclose
xorl %eax, %eax
addq $328, %rsp # imm = 0x148
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_46:
.cfi_def_cfa_offset 384
movq stderr(%rip), %rcx
movl $.L.str, %edi
movl $60, %esi
movl $1, %edx
callq fwrite@PLT
movl $1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8convol2DPfS_S_iiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8convol2DPfS_S_iiii,@object # @_Z8convol2DPfS_S_iiii
.section .rodata,"a",@progbits
.globl _Z8convol2DPfS_S_iiii
.p2align 3, 0x0
_Z8convol2DPfS_S_iiii:
.quad _Z23__device_stub__convol2DPfS_S_iiii
.size _Z8convol2DPfS_S_iiii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Incorrect arguments passed.\nUse \"./2dconvol.o <input_file>\"\n"
.size .L.str, 61
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "r"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Size of A: %dx%d\n"
.size .L.str.2, 18
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Size of H: %dx%d\n"
.size .L.str.3, 18
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Size of C: %dx%d\n"
.size .L.str.4, 18
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Size of grid: %dx%d\n"
.size .L.str.5, 21
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Size of block: %dx%d\n"
.size .L.str.6, 22
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "%f"
.size .L.str.7, 3
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%f "
.size .L.str.8, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z8convol2DPfS_S_iiii"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Completed Loading Matrices..."
.size .Lstr, 30
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__convol2DPfS_S_iiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8convol2DPfS_S_iiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z8convol2DPfS_S_iiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R4, RZ, RZ, 0x1 ; /* 0x00000001ff047424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0040*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x000fe200078e00ff */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0060*/ ISETP.LE.AND P0, PT, R4.reuse, c[0x0][0x184], PT ; /* 0x0000610004007a0c */
/* 0x040fe40003f03270 */
/*0070*/ S2R R7, SR_CTAID.Y ; /* 0x0000000000077919 */
/* 0x000e640000002600 */
/*0080*/ ISETP.GT.OR P0, PT, R4, c[0x0][0x180], !P0 ; /* 0x0000600004007a0c */
/* 0x000fc40004704670 */
/*0090*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*00a0*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fe400078e0203 */
/*00b0*/ IMAD R7, R7, c[0x0][0x4], R2 ; /* 0x0000010007077a24 */
/* 0x002fd000078e0202 */
/*00c0*/ @P0 BRA 0x780 ; /* 0x000006b000000947 */
/* 0x000fea0003800000 */
/*00d0*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff087624 */
/* 0x000fe200078e00ff */
/*00e0*/ HFMA2.MMA R6, -RZ, RZ, 0, 0 ; /* 0x00000000ff067435 */
/* 0x000fe200000001ff */
/*00f0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x1 ; /* 0x00000001ff097424 */
/* 0x000fe400078e00ff */
/*0100*/ IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a7224 */
/* 0x000fe200078e00ff */
/*0110*/ LOP3.LUT R8, R8, 0x3, RZ, 0xc0, !PT ; /* 0x0000000308087812 */
/* 0x000fe400078ec0ff */
/*0120*/ IADD3 R9, -R9, c[0x0][0x184], RZ ; /* 0x0000610009097a10 */
/* 0x000fe40007ffe1ff */
/*0130*/ IADD3 R11, -R8, c[0x0][0x184], RZ ; /* 0x00006100080b7a10 */
/* 0x000fe40007ffe1ff */
/*0140*/ ISETP.GE.U32.AND P1, PT, R9, 0x3, PT ; /* 0x000000030900780c */
/* 0x000fe20003f26070 */
/*0150*/ IMAD.IADD R12, R7, 0x1, -R10.reuse ; /* 0x00000001070c7824 */
/* 0x102fe200078e0a0a */
/*0160*/ ISETP.NE.AND P4, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f85270 */
/*0170*/ IMAD.MOV.U32 R14, RZ, RZ, R10 ; /* 0x000000ffff0e7224 */
/* 0x000fc400078e000a */
/*0180*/ IMAD.MOV.U32 R13, RZ, RZ, RZ ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e00ff */
/*0190*/ ISETP.GE.AND P0, PT, R12, c[0x0][0x178], PT ; /* 0x00005e000c007a0c */
/* 0x000fc80003f06270 */
/*01a0*/ ISETP.GE.AND P0, PT, R7, R10, !P0 ; /* 0x0000000a0700720c */
/* 0x000fe40004706270 */
/*01b0*/ IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0a7810 */
/* 0x000fc80007ffe0ff */
/*01c0*/ ISETP.GE.AND P5, PT, R10, c[0x0][0x180], PT ; /* 0x000060000a007a0c */
/* 0x000fe20003fa6270 */
/*01d0*/ @!P1 BRA 0x4b0 ; /* 0x000002d000009947 */
/* 0x005fee0003800000 */
/*01e0*/ IMAD.MOV.U32 R13, RZ, RZ, RZ ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e00ff */
/*01f0*/ MOV R15, R11 ; /* 0x0000000b000f7202 */
/* 0x000fc80000000f00 */
/*0200*/ IMAD.IADD R3, R0.reuse, 0x1, -R13.reuse ; /* 0x0000000100037824 */
/* 0x140fe200078e0a0d */
/*0210*/ ISETP.LT.OR P1, PT, R0, R13, !P0 ; /* 0x0000000d0000720c */
/* 0x000fe20004721670 */
/*0220*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fe400078e00ff */
/*0230*/ IMAD R4, R14, c[0x0][0x184], R13 ; /* 0x000061000e047a24 */
/* 0x000fe200078e020d */
/*0240*/ ISETP.GE.OR P1, PT, R3, c[0x0][0x17c], P1 ; /* 0x00005f0003007a0c */
/* 0x000fe20000f26670 */
/*0250*/ IMAD R3, R12, c[0x0][0x17c], R3 ; /* 0x00005f000c037a24 */
/* 0x000fe400078e0203 */
/*0260*/ IMAD.WIDE R4, R4, R2, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fc800078e0202 */
/*0270*/ IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fcc00078e0202 */
/*0280*/ @!P1 LDG.E R21, [R4.64] ; /* 0x0000000604159981 */
/* 0x000ea8000c1e1900 */
/*0290*/ @!P1 LDG.E R22, [R2.64] ; /* 0x0000000602169981 */
/* 0x000ea2000c1e1900 */
/*02a0*/ ISETP.LE.OR P2, PT, R0.reuse, R13, !P0 ; /* 0x0000000d0000720c */
/* 0x040fe40004743670 */
/*02b0*/ IADD3 R16, R0, -0x1, -R13 ; /* 0xffffffff00107810 */
/* 0x000fc80007ffe80d */
/*02c0*/ ISETP.GE.OR P2, PT, R16, c[0x0][0x17c], P2 ; /* 0x00005f0010007a0c */
/* 0x000fda0001746670 */
/*02d0*/ @!P2 LDG.E R16, [R4.64+0x4] ; /* 0x000004060410a981 */
/* 0x000ee8000c1e1900 */
/*02e0*/ @!P2 LDG.E R17, [R2.64+-0x4] ; /* 0xfffffc060211a981 */
/* 0x000ee2000c1e1900 */
/*02f0*/ IADD3 R19, R13, 0x2, RZ ; /* 0x000000020d137810 */
/* 0x000fc80007ffe0ff */
/*0300*/ ISETP.LT.OR P3, PT, R0.reuse, R19, !P0 ; /* 0x000000130000720c */
/* 0x040fe20004761670 */
/*0310*/ IMAD.IADD R19, R0, 0x1, -R19 ; /* 0x0000000100137824 */
/* 0x000fca00078e0a13 */
/*0320*/ ISETP.GE.OR P3, PT, R19, c[0x0][0x17c], P3 ; /* 0x00005f0013007a0c */
/* 0x000fda0001f66670 */
/*0330*/ @!P3 LDG.E R18, [R4.64+0x8] ; /* 0x000008060412b981 */
/* 0x000f28000c1e1900 */
/*0340*/ @!P3 LDG.E R19, [R2.64+-0x8] ; /* 0xfffff8060213b981 */
/* 0x000f22000c1e1900 */
/*0350*/ IADD3 R23, R13, 0x3, RZ ; /* 0x000000030d177810 */
/* 0x001fc80007ffe0ff */
/*0360*/ ISETP.LT.OR P6, PT, R0.reuse, R23, !P0 ; /* 0x000000170000720c */
/* 0x040fe200047c1670 */
/*0370*/ IMAD.IADD R23, R0, 0x1, -R23 ; /* 0x0000000100177824 */
/* 0x000fca00078e0a17 */
/*0380*/ ISETP.GE.OR P6, PT, R23, c[0x0][0x17c], P6 ; /* 0x00005f0017007a0c */
/* 0x000fda00037c6670 */
/*0390*/ @!P6 LDG.E R23, [R4.64+0xc] ; /* 0x00000c060417e981 */
/* 0x000168000c1e1900 */
/*03a0*/ @!P6 LDG.E R24, [R2.64+-0xc] ; /* 0xfffff4060218e981 */
/* 0x000f62000c1e1900 */
/*03b0*/ @!P1 I2F R20, R6 ; /* 0x0000000600149306 */
/* 0x002ea20000201400 */
/*03c0*/ IADD3 R15, R15, -0x4, RZ ; /* 0xfffffffc0f0f7810 */
/* 0x000fe40007ffe0ff */
/*03d0*/ IADD3 R13, R13, 0x4, RZ ; /* 0x000000040d0d7810 */
/* 0x000fe20007ffe0ff */
/*03e0*/ @!P1 FFMA R20, R21, R22, R20 ; /* 0x0000001615149223 */
/* 0x004fc80000000014 */
/*03f0*/ @!P1 F2I.TRUNC.NTZ R6, R20 ; /* 0x0000001400069305 */
/* 0x000e62000020f100 */
/*0400*/ ISETP.NE.AND P1, PT, R15, RZ, PT ; /* 0x000000ff0f00720c */
/* 0x000fce0003f25270 */
/*0410*/ @!P2 I2F R21, R6 ; /* 0x000000060015a306 */
/* 0x002ee40000201400 */
/*0420*/ @!P2 FFMA R16, R16, R17, R21 ; /* 0x000000111010a223 */
/* 0x008fcc0000000015 */
/*0430*/ @!P2 F2I.TRUNC.NTZ R6, R16 ; /* 0x000000100006a305 */
/* 0x000e70000020f100 */
/*0440*/ @!P3 I2F R17, R6 ; /* 0x000000060011b306 */
/* 0x002f240000201400 */
/*0450*/ @!P3 FFMA R18, R18, R19, R17 ; /* 0x000000131212b223 */
/* 0x010fcc0000000011 */
/*0460*/ @!P3 F2I.TRUNC.NTZ R6, R18 ; /* 0x000000120006b305 */
/* 0x000e30000020f100 */
/*0470*/ @!P6 I2F R4, R6 ; /* 0x000000060004e306 */
/* 0x001f640000201400 */
/*0480*/ @!P6 FFMA R23, R23, R24, R4 ; /* 0x000000181717e223 */
/* 0x020fcc0000000004 */
/*0490*/ @!P6 F2I.TRUNC.NTZ R6, R23 ; /* 0x000000170006e305 */
/* 0x000062000020f100 */
/*04a0*/ @P1 BRA 0x200 ; /* 0xfffffd5000001947 */
/* 0x000fea000383ffff */
/*04b0*/ @!P4 BRA 0x770 ; /* 0x000002b00000c947 */
/* 0x000fea0003800000 */
/*04c0*/ ISETP.LT.OR P1, PT, R0.reuse, R13.reuse, !P0 ; /* 0x0000000d0000720c */
/* 0x0c0fe20004721670 */
/*04d0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe200078e00ff */
/*04e0*/ IADD3 R3, R0, -R13, RZ ; /* 0x8000000d00037210 */
/* 0x000fe20007ffe0ff */
/*04f0*/ IMAD R2, R14, c[0x0][0x184], R13 ; /* 0x000061000e027a24 */
/* 0x000fe200078e020d */
/*0500*/ BSSY B0, 0x5d0 ; /* 0x000000c000007945 */
/* 0x000fe20003800000 */
/*0510*/ ISETP.NE.AND P2, PT, R8, 0x1, PT ; /* 0x000000010800780c */
/* 0x000fe40003f45270 */
/*0520*/ ISETP.GE.OR P1, PT, R3, c[0x0][0x17c], P1 ; /* 0x00005f0003007a0c */
/* 0x000fe20000f26670 */
/*0530*/ IMAD R4, R12, c[0x0][0x17c], R3 ; /* 0x00005f000c047a24 */
/* 0x000fe400078e0203 */
/*0540*/ IMAD.WIDE R2, R2, R5, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fc800078e0205 */
/*0550*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fcc00078e0205 */
/*0560*/ @P1 BRA 0x5c0 ; /* 0x0000005000001947 */
/* 0x000fea0003800000 */
/*0570*/ LDG.E R12, [R2.64] ; /* 0x00000006020c7981 */
/* 0x000ea8000c1e1900 */
/*0580*/ LDG.E R15, [R4.64] ; /* 0x00000006040f7981 */
/* 0x000ea2000c1e1900 */
/*0590*/ I2F R6, R6 ; /* 0x0000000600067306 */
/* 0x002ea40000201400 */
/*05a0*/ FFMA R12, R12, R15, R6 ; /* 0x0000000f0c0c7223 */
/* 0x004fcc0000000006 */
/*05b0*/ F2I.TRUNC.NTZ R6, R12 ; /* 0x0000000c00067305 */
/* 0x0002a4000020f100 */
/*05c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*05d0*/ @!P2 BRA 0x770 ; /* 0x000001900000a947 */
/* 0x000fea0003800000 */
/*05e0*/ ISETP.LE.OR P1, PT, R0.reuse, R13, !P0 ; /* 0x0000000d0000720c */
/* 0x040fe20004723670 */
/*05f0*/ BSSY B0, 0x6a0 ; /* 0x000000a000007945 */
/* 0x000fe20003800000 */
/*0600*/ IADD3 R12, R0, -0x1, -R13 ; /* 0xffffffff000c7810 */
/* 0x002fe40007ffe80d */
/*0610*/ ISETP.NE.AND P2, PT, R8, 0x2, PT ; /* 0x000000020800780c */
/* 0x000fe40003f45270 */
/*0620*/ ISETP.GE.OR P1, PT, R12, c[0x0][0x17c], P1 ; /* 0x00005f000c007a0c */
/* 0x000fda0000f26670 */
/*0630*/ @P1 BRA 0x690 ; /* 0x0000005000001947 */
/* 0x000fea0003800000 */
/*0640*/ LDG.E R12, [R2.64+0x4] ; /* 0x00000406020c7981 */
/* 0x000ee8000c1e1900 */
/*0650*/ LDG.E R15, [R4.64+-0x4] ; /* 0xfffffc06040f7981 */
/* 0x000ee2000c1e1900 */
/*0660*/ I2F R6, R6 ; /* 0x0000000600067306 */
/* 0x004ee40000201400 */
/*0670*/ FFMA R12, R12, R15, R6 ; /* 0x0000000f0c0c7223 */
/* 0x008fcc0000000006 */
/*0680*/ F2I.TRUNC.NTZ R6, R12 ; /* 0x0000000c00067305 */
/* 0x0002a4000020f100 */
/*0690*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*06a0*/ @!P2 BRA 0x770 ; /* 0x000000c00000a947 */
/* 0x000fea0003800000 */
/*06b0*/ IADD3 R13, R13, 0x2, RZ ; /* 0x000000020d0d7810 */
/* 0x000fe20007ffe0ff */
/*06c0*/ BSSY B0, 0x770 ; /* 0x000000a000007945 */
/* 0x000fe60003800000 */
/*06d0*/ ISETP.LT.OR P0, PT, R0.reuse, R13, !P0 ; /* 0x0000000d0000720c */
/* 0x040fe20004701670 */
/*06e0*/ IMAD.IADD R13, R0, 0x1, -R13 ; /* 0x00000001000d7824 */
/* 0x000fca00078e0a0d */
/*06f0*/ ISETP.GE.OR P0, PT, R13, c[0x0][0x17c], P0 ; /* 0x00005f000d007a0c */
/* 0x000fda0000706670 */
/*0700*/ @P0 BRA 0x760 ; /* 0x0000005000000947 */
/* 0x000fea0003800000 */
/*0710*/ LDG.E R2, [R2.64+0x8] ; /* 0x0000080602027981 */
/* 0x000ee8000c1e1900 */
/*0720*/ LDG.E R5, [R4.64+-0x8] ; /* 0xfffff80604057981 */
/* 0x000ee2000c1e1900 */
/*0730*/ I2F R12, R6 ; /* 0x00000006000c7306 */
/* 0x006ee40000201400 */
/*0740*/ FFMA R12, R2, R5, R12 ; /* 0x00000005020c7223 */
/* 0x008fcc000000000c */
/*0750*/ F2I.TRUNC.NTZ R6, R12 ; /* 0x0000000c00067305 */
/* 0x0002a4000020f100 */
/*0760*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0770*/ @!P5 BRA 0x140 ; /* 0xfffff9c00000d947 */
/* 0x000fea000383ffff */
/*0780*/ ULDC UR4, c[0x0][0xc] ; /* 0x0000030000047ab9 */
/* 0x000fe20000000800 */
/*0790*/ I2F R5, R6 ; /* 0x0000000600057306 */
/* 0x006e620000201400 */
/*07a0*/ ULDC UR5, c[0x0][0x0] ; /* 0x0000000000057ab9 */
/* 0x000fe20000000800 */
/*07b0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*07c0*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fcc000f8e023f */
/*07d0*/ IMAD R2, R7, UR4, R0 ; /* 0x0000000407027c24 */
/* 0x000fc8000f8e0200 */
/*07e0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */
/* 0x000fca00078e0203 */
/*07f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x002fe2000c101906 */
/*0800*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0810*/ BRA 0x810; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0820*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0830*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0840*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0850*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0860*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0870*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0880*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0890*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8convol2DPfS_S_iiii
.globl _Z8convol2DPfS_S_iiii
.p2align 8
.type _Z8convol2DPfS_S_iiii,@function
_Z8convol2DPfS_S_iiii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s16, s[0:1], 0x20
s_load_b32 s12, s[0:1], 0x28
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s13, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s14, s13, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
s_cmp_lt_i32 s16, 1
s_cbranch_scc1 .LBB0_9
s_clause 0x2
s_load_b32 s14, s[0:1], 0x24
s_load_b64 s[8:9], s[0:1], 0x18
s_load_b128 s[4:7], s[0:1], 0x0
v_mov_b32_e32 v3, 0
s_mov_b32 s11, 0
s_mov_b32 s17, 0
s_mov_b32 s18, 0
s_waitcnt lgkmcnt(0)
s_cmp_gt_i32 s14, 0
v_mul_lo_u32 v2, s9, v1
s_cselect_b32 s15, -1, 0
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1)
v_subrev_nc_u32_e32 v2, s9, v2
s_add_i32 s18, s18, 1
s_add_i32 s17, s17, s14
s_cmp_eq_u32 s18, s16
s_cbranch_scc1 .LBB0_8
.LBB0_3:
s_and_not1_b32 vcc_lo, exec_lo, s15
s_cbranch_vccnz .LBB0_2
v_subrev_nc_u32_e32 v4, s18, v1
v_mov_b32_e32 v5, v0
s_mov_b32 s10, s17
s_mov_b32 s19, s14
s_delay_alu instid0(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s8, v4
s_branch .LBB0_6
.p2align 6
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s3
v_add_nc_u32_e32 v5, -1, v5
s_add_i32 s19, s19, -1
s_add_i32 s10, s10, 1
s_cmp_eq_u32 s19, 0
s_cbranch_scc1 .LBB0_2
.LBB0_6:
v_or_b32_e32 v6, v5, v4
v_cmp_gt_i32_e64 s3, s9, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_lt_i32_e64 s2, -1, v6
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_5
v_add_nc_u32_e32 v6, v2, v5
s_lshl_b64 s[20:21], s[10:11], 2
v_cvt_f32_i32_e32 v3, v3
s_add_u32 s20, s6, s20
s_addc_u32 s21, s7, s21
v_ashrrev_i32_e32 v7, 31, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[6:7]
v_add_co_u32 v6, s2, s4, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v7, s2, s5, v7, s2
s_load_b32 s2, s[20:21], 0x0
global_load_b32 v6, v[6:7], off
s_waitcnt vmcnt(0) lgkmcnt(0)
v_fmac_f32_e32 v3, s2, v6
v_cvt_i32_f32_e32 v3, v3
s_branch .LBB0_5
.LBB0_8:
s_set_inst_prefetch_distance 0x2
v_cvt_f32_i32_e32 v2, v3
s_branch .LBB0_10
.LBB0_9:
v_mov_b32_e32 v2, 0
.LBB0_10:
s_mul_i32 s12, s12, s13
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[3:4], null, s12, v1, v[0:1]
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8convol2DPfS_S_iiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 22
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8convol2DPfS_S_iiii, .Lfunc_end0-_Z8convol2DPfS_S_iiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8convol2DPfS_S_iiii
.private_segment_fixed_size: 0
.sgpr_count: 24
.sgpr_spill_count: 0
.symbol: _Z8convol2DPfS_S_iiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00022709_00000000-6_2dconvol.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z8convol2DPfS_S_iiiiPfS_S_iiii
.type _Z35__device_stub__Z8convol2DPfS_S_iiiiPfS_S_iiii, @function
_Z35__device_stub__Z8convol2DPfS_S_iiiiPfS_S_iiii:
.LFB2082:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z8convol2DPfS_S_iiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z35__device_stub__Z8convol2DPfS_S_iiiiPfS_S_iiii, .-_Z35__device_stub__Z8convol2DPfS_S_iiiiPfS_S_iiii
.globl _Z8convol2DPfS_S_iiii
.type _Z8convol2DPfS_S_iiii, @function
_Z8convol2DPfS_S_iiii:
.LFB2083:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z35__device_stub__Z8convol2DPfS_S_iiiiPfS_S_iiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z8convol2DPfS_S_iiii, .-_Z8convol2DPfS_S_iiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Incorrect arguments passed.\nUse \"./2dconvol.o <input_file>\"\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "r"
.LC2:
.string "Size of A: %dx%d\n"
.LC3:
.string "Size of H: %dx%d\n"
.LC4:
.string "Size of C: %dx%d\n"
.LC5:
.string "Size of grid: %dx%d\n"
.LC6:
.string "Size of block: %dx%d\n"
.LC7:
.string "%f"
.LC8:
.string "%f "
.LC9:
.string "\n"
.section .rodata.str1.8
.align 8
.LC10:
.string "Completed Loading Matrices...\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $200, %rsp
.cfi_def_cfa_offset 256
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
cmpl $2, %edi
jne .L66
movq $0, 136(%rsp)
movq $0, 144(%rsp)
movq $0, 152(%rsp)
movl $1, 168(%rsp)
movl $1, 180(%rsp)
movq 8(%rsi), %rdi
leaq .LC1(%rip), %rsi
call fopen@PLT
movq %rax, %rbx
movq %rax, %rdi
call fgetc@PLT
movl %eax, %ebp
movl $0, %r12d
movl $1, %r13d
jmp .L13
.L66:
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L67:
addl $1, %r12d
jmp .L15
.L55:
movl %eax, %ebp
.L13:
cmpb $-1, %bpl
je .L16
cmpb $10, %bpl
je .L67
cmpb $32, %bpl
jne .L15
testl %r12d, %r12d
sete %al
cmpb $1, %al
sbbl $-1, %r13d
.L15:
movq %rbx, %rdi
call fgetc@PLT
cmpb $10, %al
jne .L55
cmpb %al, %bpl
jne .L55
.L16:
movl %r13d, 48(%rsp)
movl %r12d, 16(%rsp)
movq %rbx, %rdi
call fgetc@PLT
movl %eax, %edx
cmpb $-1, %al
je .L54
movl $0, %ebp
movl $1, %r12d
jmp .L22
.L69:
addl $1, %ebp
.L21:
movq %rbx, %rdi
call fgetc@PLT
movl %eax, %edx
cmpb $-1, %al
je .L68
.L22:
cmpb $10, %dl
je .L69
cmpb $32, %dl
jne .L21
testl %ebp, %ebp
sete %al
cmpb $1, %al
sbbl $-1, %r12d
jmp .L21
.L68:
movl %r12d, 4(%rsp)
movl %ebp, 8(%rsp)
.L19:
movl 16(%rsp), %r14d
movl 8(%rsp), %r13d
leal (%r14,%r13), %esi
movl %esi, 104(%rsp)
leal -1(%rsi), %ebp
movl $32, %eax
cmpl %eax, %ebp
cmovle %ebp, %eax
movl %eax, 64(%rsp)
movl 48(%rsp), %r12d
movl 4(%rsp), %ecx
leal (%r12,%rcx), %r10d
movl %r10d, 108(%rsp)
leal -1(%r10), %r15d
movl %r15d, 120(%rsp)
movl $16, %eax
cmpl %eax, %r15d
cmovle %r15d, %eax
movl %eax, 68(%rsp)
leal 30(%rsi), %eax
testl %ebp, %ebp
cmovns %ebp, %eax
sarl $5, %eax
leal 1(%rax), %esi
movl %esi, 72(%rsp)
leal 14(%r10), %eax
testl %r15d, %r15d
cmovns %r15d, %eax
sarl $4, %eax
leal 1(%rax), %r11d
movl %r11d, 76(%rsp)
movl %r12d, %ecx
movl %r14d, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 4(%rsp), %ecx
movl %r13d, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r15d, %ecx
movl %ebp, %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 76(%rsp), %ecx
movl 72(%rsp), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 68(%rsp), %ecx
movl 64(%rsp), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r14d, %eax
imull %r12d, %eax
cltq
leaq 0(,%rax,4), %rdx
movq %rdx, 88(%rsp)
movl 4(%rsp), %ecx
imull %ecx, %r13d
movl %r13d, %eax
cltq
leaq 0(,%rax,4), %r13
movq %r13, 96(%rsp)
movl %r15d, %eax
imull %ebp, %eax
cltq
leaq 0(,%rax,4), %r15
movq %r15, 80(%rsp)
movq %rdx, %rdi
call malloc@PLT
movq %rax, 24(%rsp)
movq %r13, %rdi
call malloc@PLT
movq %rax, 32(%rsp)
movq %r15, %rdi
call malloc@PLT
movq %rax, 40(%rsp)
movq %rbx, %rdi
call rewind@PLT
testl %r14d, %r14d
jle .L23
movl %r12d, 52(%rsp)
movl $0, %r14d
movl $0, %r15d
leaq .LC7(%rip), %r13
movl %ebp, 56(%rsp)
jmp .L24
.L54:
movl $0, 8(%rsp)
movl $1, 4(%rsp)
jmp .L19
.L27:
movslq %r15d, %rsi
movq 24(%rsp), %rcx
leaq (%rcx,%rsi,4), %rbp
movslq 48(%rsp), %rax
addq %rsi, %rax
leaq (%rcx,%rax,4), %r12
.L25:
movq %rbp, %rdx
movq %r13, %rsi
movq %rbx, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
addq $4, %rbp
cmpq %r12, %rbp
jne .L25
.L28:
addl $1, %r14d
movl 52(%rsp), %eax
addl %eax, %r15d
cmpl %r14d, 16(%rsp)
je .L26
.L24:
cmpl $0, 48(%rsp)
jg .L27
jmp .L28
.L32:
movslq %r15d, %rsi
movq 32(%rsp), %rcx
leaq (%rcx,%rsi,4), %rbp
movslq 4(%rsp), %rax
addq %rsi, %rax
leaq (%rcx,%rax,4), %r12
.L30:
movq %rbp, %rdx
movq %r13, %rsi
movq %rbx, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
addq $4, %rbp
cmpq %r12, %rbp
jne .L30
.L33:
addl $1, %r14d
movl 52(%rsp), %eax
addl %eax, %r15d
movl 8(%rsp), %eax
cmpl %eax, %r14d
je .L31
.L29:
cmpl $0, 4(%rsp)
jg .L32
jmp .L33
.L31:
movl 56(%rsp), %ebp
cmpl $0, 16(%rsp)
jle .L34
.L52:
movl 48(%rsp), %eax
movl %eax, 52(%rsp)
movl $0, %r15d
movl $0, %r14d
cltq
movq %rax, 56(%rsp)
leaq .LC8(%rip), %r13
movq %rbx, 112(%rsp)
movl %ebp, 124(%rsp)
movq 24(%rsp), %r12
jmp .L35
.L38:
movslq %r15d, %rax
leaq (%r12,%rax,4), %rbx
movq 56(%rsp), %rcx
addq %rcx, %rax
leaq (%r12,%rax,4), %rbp
.L36:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L36
.L39:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r14d
movl 52(%rsp), %eax
addl %eax, %r15d
cmpl %r14d, 16(%rsp)
jle .L37
.L35:
cmpl $0, 48(%rsp)
jg .L38
jmp .L39
.L37:
movq 112(%rsp), %rbx
movl 124(%rsp), %ebp
cmpl $0, 8(%rsp)
jle .L40
.L34:
movl 4(%rsp), %eax
movl %eax, 52(%rsp)
movl $0, %r15d
movl $0, %r14d
cltq
movq %rax, 56(%rsp)
leaq .LC8(%rip), %r13
movq %rbx, 112(%rsp)
movl %ebp, 124(%rsp)
movq 32(%rsp), %r12
jmp .L41
.L43:
movslq %r15d, %rax
leaq (%r12,%rax,4), %rbx
movq 56(%rsp), %rcx
addq %rcx, %rax
leaq (%r12,%rax,4), %rbp
.L42:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L42
.L44:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r14d
movl 52(%rsp), %eax
addl %eax, %r15d
movl 8(%rsp), %eax
cmpl %eax, %r14d
je .L63
.L41:
cmpl $0, 4(%rsp)
jg .L43
jmp .L44
.L63:
movq 112(%rsp), %rbx
movl 124(%rsp), %ebp
.L40:
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 136(%rsp), %rdi
movq 88(%rsp), %r13
movq %r13, %rsi
call cudaMalloc@PLT
leaq 144(%rsp), %rdi
movq 96(%rsp), %r15
movq %r15, %rsi
call cudaMalloc@PLT
leaq 152(%rsp), %rdi
movq 80(%rsp), %r14
movq %r14, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r13, %rdx
movq 24(%rsp), %rsi
movq 136(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r15, %rdx
movq 32(%rsp), %rsi
movq 144(%rsp), %rdi
call cudaMemcpy@PLT
movq %r14, %rdx
movl $0, %esi
movq 152(%rsp), %rdi
call cudaMemset@PLT
movl 76(%rsp), %eax
movl %eax, 172(%rsp)
movl 72(%rsp), %eax
movl %eax, 176(%rsp)
movl 68(%rsp), %eax
movl %eax, 160(%rsp)
movl 64(%rsp), %eax
movl %eax, 164(%rsp)
movl 168(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 160(%rsp), %rdx
movq 172(%rsp), %rdi
movl 180(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L70
.L45:
call cudaThreadSynchronize@PLT
movl $2, %ecx
movq 80(%rsp), %rdx
movq 152(%rsp), %rsi
movq 40(%rsp), %r12
movq %r12, %rdi
call cudaMemcpy@PLT
testl %ebp, %ebp
jle .L46
movl 104(%rsp), %r15d
subl $1, %r15d
movl $0, %r14d
movl $0, %r13d
movl 108(%rsp), %eax
subl $2, %eax
movl %eax, 48(%rsp)
leaq 4(%r12), %rax
movq %rax, 8(%rsp)
leaq .LC8(%rip), %r12
movq %rbx, 16(%rsp)
movl %r15d, 4(%rsp)
movl 120(%rsp), %r15d
jmp .L47
.L70:
subq $8, %rsp
.cfi_def_cfa_offset 264
movl 12(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 272
movl 24(%rsp), %r9d
movl 64(%rsp), %r8d
movl 32(%rsp), %ecx
movq 168(%rsp), %rdx
movq 160(%rsp), %rsi
movq 152(%rsp), %rdi
call _Z35__device_stub__Z8convol2DPfS_S_iiiiPfS_S_iiii
addq $16, %rsp
.cfi_def_cfa_offset 256
jmp .L45
.L49:
movslq %r14d, %rdx
movq 40(%rsp), %rax
leaq (%rax,%rdx,4), %rbx
movl 48(%rsp), %eax
addq %rdx, %rax
movq 8(%rsp), %rcx
leaq (%rcx,%rax,4), %rbp
.L48:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L48
.L50:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addl %r15d, %r14d
movl 4(%rsp), %eax
cmpl %eax, %r13d
je .L64
.L47:
testl %r15d, %r15d
jg .L49
jmp .L50
.L64:
movq 16(%rsp), %rbx
.L46:
movq 136(%rsp), %rdi
call cudaFree@PLT
movq 144(%rsp), %rdi
call cudaFree@PLT
movq 152(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call free@PLT
movq 32(%rsp), %rdi
call free@PLT
movq 40(%rsp), %rdi
call free@PLT
movq %rbx, %rdi
call fclose@PLT
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L71
movl $0, %eax
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
movl 56(%rsp), %ebp
cmpl $0, 8(%rsp)
jle .L52
.L51:
movl 4(%rsp), %eax
movl %eax, 52(%rsp)
movl $0, %r14d
movl $0, %r15d
leaq .LC7(%rip), %r13
movl %ebp, 56(%rsp)
jmp .L29
.L23:
cmpl $0, 8(%rsp)
jg .L51
jmp .L40
.L71:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC11:
.string "_Z8convol2DPfS_S_iiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z8convol2DPfS_S_iiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "2dconvol.hip"
.globl _Z23__device_stub__convol2DPfS_S_iiii # -- Begin function _Z23__device_stub__convol2DPfS_S_iiii
.p2align 4, 0x90
.type _Z23__device_stub__convol2DPfS_S_iiii,@function
_Z23__device_stub__convol2DPfS_S_iiii: # @_Z23__device_stub__convol2DPfS_S_iiii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z8convol2DPfS_S_iiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z23__device_stub__convol2DPfS_S_iiii, .Lfunc_end0-_Z23__device_stub__convol2DPfS_S_iiii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $328, %rsp # imm = 0x148
.cfi_def_cfa_offset 384
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
cmpl $2, %edi
jne .LBB1_46
# %bb.1:
movq $0, 72(%rsp)
movq $0, 64(%rsp)
movq $0, 24(%rsp)
movq 8(%rsi), %rdi
movl $.L.str.1, %esi
callq fopen
movq %rax, %rbx
movq %rax, %rdi
callq fgetc
movl %eax, %ebp
xorl %r15d, %r15d
movl $0, %r13d
movl $1, %eax
movq %rax, 8(%rsp) # 8-byte Spill
cmpb $-1, %bpl
je .LBB1_8
# %bb.2: # %.preheader179.preheader
movl $1, %eax
movq %rax, 8(%rsp) # 8-byte Spill
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB1_3: # %.preheader179
# =>This Inner Loop Header: Depth=1
shll $24, %ebp
cmpl $167772160, %ebp # imm = 0xA000000
jne .LBB1_5
# %bb.4: # in Loop: Header=BB1_3 Depth=1
incl %r13d
jmp .LBB1_6
.p2align 4, 0x90
.LBB1_5: # in Loop: Header=BB1_3 Depth=1
movl %ebp, %eax
xorl $536870912, %eax # imm = 0x20000000
xorl %ecx, %ecx
orl %r13d, %eax
sete %cl
movq 8(%rsp), %rax # 8-byte Reload
addl %ecx, %eax
movq %rax, 8(%rsp) # 8-byte Spill
.LBB1_6: # in Loop: Header=BB1_3 Depth=1
movq %rbx, %rdi
callq fgetc
movl %eax, %edx
shll $24, %edx
movl %edx, %ecx
xorl $167772160, %ecx # imm = 0xA000000
xorl $167772160, %ebp # imm = 0xA000000
orl %ecx, %ebp
setne %cl
cmpl $-16777216, %edx # imm = 0xFF000000
je .LBB1_8
# %bb.7: # in Loop: Header=BB1_3 Depth=1
movl %eax, %ebp
testb %cl, %cl
jne .LBB1_3
.LBB1_8: # %.loopexit
movl $1, %r14d
jmp .LBB1_9
.p2align 4, 0x90
.LBB1_12: # in Loop: Header=BB1_9 Depth=1
xorl $536870912, %eax # imm = 0x20000000
xorl %ecx, %ecx
orl %r15d, %eax
sete %cl
addl %ecx, %r14d
.LBB1_9: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
callq fgetc
shll $24, %eax
cmpl $-16777216, %eax # imm = 0xFF000000
je .LBB1_13
# %bb.10: # in Loop: Header=BB1_9 Depth=1
cmpl $167772160, %eax # imm = 0xA000000
jne .LBB1_12
# %bb.11: # in Loop: Header=BB1_9 Depth=1
incl %r15d
jmp .LBB1_9
.LBB1_13:
leal (%r15,%r13), %ecx
decl %ecx
movl %ecx, 20(%rsp) # 4-byte Spill
cmpl $32, %ecx
movl $32, %eax
cmovll %ecx, %eax
movq %rax, 40(%rsp) # 8-byte Spill
movq 8(%rsp), %r12 # 8-byte Reload
leal (%r12,%r14), %esi
decl %esi
movl %esi, 16(%rsp) # 4-byte Spill
movq %r12, %rax
cmpl $16, %esi
movl $16, %edx
cmovll %esi, %edx
movq %rdx, 128(%rsp) # 8-byte Spill
leal (%r15,%r13), %r12d
addl $30, %r12d
testl %ecx, %ecx
cmovnsl %ecx, %r12d
sarl $5, %r12d
incl %r12d
leal (%rax,%r14), %ebp
addl $14, %ebp
movq %rax, %rdx
movq %rax, 8(%rsp) # 8-byte Spill
testl %esi, %esi
cmovnsl %esi, %ebp
sarl $4, %ebp
incl %ebp
movl $.L.str.2, %edi
movl %r13d, %esi
# kill: def $edx killed $edx killed $rdx
xorl %eax, %eax
callq printf
movl $.L.str.3, %edi
movl %r15d, %esi
movl %r14d, %edx
xorl %eax, %eax
callq printf
movl $.L.str.4, %edi
movl 20(%rsp), %esi # 4-byte Reload
movl 16(%rsp), %edx # 4-byte Reload
xorl %eax, %eax
callq printf
movl $.L.str.5, %edi
movq %r12, 88(%rsp) # 8-byte Spill
movl %r12d, %esi
movq %rbp, 168(%rsp) # 8-byte Spill
movl %ebp, %edx
xorl %eax, %eax
callq printf
movl $.L.str.6, %edi
movq 40(%rsp), %rsi # 8-byte Reload
# kill: def $esi killed $esi killed $rsi
movq 128(%rsp), %rdx # 8-byte Reload
# kill: def $edx killed $edx killed $rdx
xorl %eax, %eax
callq printf
movq 8(%rsp), %r12 # 8-byte Reload
movl %r12d, %eax
imull %r13d, %eax
movslq %eax, %rdi
shlq $2, %rdi
movq %r14, 104(%rsp) # 8-byte Spill
movl %r14d, %eax
movq %r15, 80(%rsp) # 8-byte Spill
imull %r15d, %eax
movslq %eax, %r15
shlq $2, %r15
movl 16(%rsp), %eax # 4-byte Reload
imull 20(%rsp), %eax # 4-byte Folded Reload
movslq %eax, %r14
shlq $2, %r14
movq %rdi, 176(%rsp) # 8-byte Spill
callq malloc
movq %rax, 56(%rsp) # 8-byte Spill
movq %r15, 184(%rsp) # 8-byte Spill
movq %r15, %rdi
callq malloc
movq %rax, 48(%rsp) # 8-byte Spill
movq %r14, 192(%rsp) # 8-byte Spill
movq %r14, %rdi
callq malloc
movq %rax, 96(%rsp) # 8-byte Spill
movq %rbx, %rdi
callq rewind
movl %r13d, %eax
movq %rax, 152(%rsp) # 8-byte Spill
movl %r12d, %r15d
movq %r13, 136(%rsp) # 8-byte Spill
testl %r13d, %r13d
jle .LBB1_19
# %bb.14: # %.preheader178.lr.ph
xorl %ebp, %ebp
xorl %r14d, %r14d
jmp .LBB1_15
.p2align 4, 0x90
.LBB1_18: # %._crit_edge
# in Loop: Header=BB1_15 Depth=1
incq %r14
movq 8(%rsp), %r12 # 8-byte Reload
addl %r12d, %ebp
cmpq 152(%rsp), %r14 # 8-byte Folded Reload
je .LBB1_19
.LBB1_15: # %.preheader178
# =>This Loop Header: Depth=1
# Child Loop BB1_17 Depth 2
testl %r12d, %r12d
jle .LBB1_18
# %bb.16: # in Loop: Header=BB1_15 Depth=1
movl %ebp, %eax
movq 56(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
movq %r15, %r12
.p2align 4, 0x90
.LBB1_17: # Parent Loop BB1_15 Depth=1
# => This Inner Loop Header: Depth=2
movl $.L.str.7, %esi
movq %rbx, %rdi
movq %r13, %rdx
xorl %eax, %eax
callq __isoc23_fscanf
addq $4, %r13
decq %r12
jne .LBB1_17
jmp .LBB1_18
.LBB1_19: # %.preheader177
movq %rbx, 144(%rsp) # 8-byte Spill
movq 80(%rsp), %rcx # 8-byte Reload
movl %ecx, %eax
movq %rax, 160(%rsp) # 8-byte Spill
movq 104(%rsp), %rax # 8-byte Reload
movl %eax, %ebx
testl %ecx, %ecx
je .LBB1_25
# %bb.20: # %.preheader176.lr.ph
movl $0, 36(%rsp) # 4-byte Folded Spill
xorl %r14d, %r14d
movq 144(%rsp), %r12 # 8-byte Reload
jmp .LBB1_21
.p2align 4, 0x90
.LBB1_24: # %._crit_edge184
# in Loop: Header=BB1_21 Depth=1
incq %r14
movq 104(%rsp), %rax # 8-byte Reload
addl %eax, 36(%rsp) # 4-byte Folded Spill
cmpq 160(%rsp), %r14 # 8-byte Folded Reload
je .LBB1_25
.LBB1_21: # %.preheader176
# =>This Loop Header: Depth=1
# Child Loop BB1_23 Depth 2
testl %eax, %eax
jle .LBB1_24
# %bb.22: # in Loop: Header=BB1_21 Depth=1
movl 36(%rsp), %eax # 4-byte Reload
movq 48(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
movq %rbx, %rbp
.p2align 4, 0x90
.LBB1_23: # Parent Loop BB1_21 Depth=1
# => This Inner Loop Header: Depth=2
movl $.L.str.7, %esi
movq %r12, %rdi
movq %r13, %rdx
xorl %eax, %eax
callq __isoc23_fscanf
addq $4, %r13
decq %rbp
jne .LBB1_23
jmp .LBB1_24
.LBB1_25: # %.preheader175
shlq $32, 40(%rsp) # 8-byte Folded Spill
shlq $32, 88(%rsp) # 8-byte Folded Spill
cmpl $0, 136(%rsp) # 4-byte Folded Reload
movq 8(%rsp), %rax # 8-byte Reload
jle .LBB1_31
# %bb.26: # %.preheader174.lr.ph
xorl %r13d, %r13d
xorl %r12d, %r12d
jmp .LBB1_27
.p2align 4, 0x90
.LBB1_30: # %._crit_edge188
# in Loop: Header=BB1_27 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r12
movq 8(%rsp), %rax # 8-byte Reload
addl %eax, %r13d
cmpq 152(%rsp), %r12 # 8-byte Folded Reload
je .LBB1_31
.LBB1_27: # %.preheader174
# =>This Loop Header: Depth=1
# Child Loop BB1_29 Depth 2
testl %eax, %eax
jle .LBB1_30
# %bb.28: # %.lr.ph187
# in Loop: Header=BB1_27 Depth=1
movl %r13d, %eax
movq 56(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r14
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_29: # Parent Loop BB1_27 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r14,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
incq %rbp
cmpq %rbp, %r15
jne .LBB1_29
jmp .LBB1_30
.LBB1_31: # %.preheader173
movq 128(%rsp), %rax # 8-byte Reload
addq %rax, 40(%rsp) # 8-byte Folded Spill
movq 168(%rsp), %rax # 8-byte Reload
addq %rax, 88(%rsp) # 8-byte Folded Spill
cmpl $0, 80(%rsp) # 4-byte Folded Reload
movq 104(%rsp), %rbp # 8-byte Reload
je .LBB1_37
# %bb.32: # %.preheader172.lr.ph
xorl %r15d, %r15d
xorl %r14d, %r14d
jmp .LBB1_33
.p2align 4, 0x90
.LBB1_36: # %._crit_edge192
# in Loop: Header=BB1_33 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addl %ebp, %r15d
cmpq 160(%rsp), %r14 # 8-byte Folded Reload
je .LBB1_37
.LBB1_33: # %.preheader172
# =>This Loop Header: Depth=1
# Child Loop BB1_35 Depth 2
testl %ebp, %ebp
jle .LBB1_36
# %bb.34: # %.lr.ph191
# in Loop: Header=BB1_33 Depth=1
movl %r15d, %eax
movq 48(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r12
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB1_35: # Parent Loop BB1_33 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r12,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
incq %r13
cmpq %r13, %rbx
jne .LBB1_35
jmp .LBB1_36
.LBB1_37: # %._crit_edge194
movl $.Lstr, %edi
callq puts@PLT
leaq 72(%rsp), %rdi
movq 176(%rsp), %r15 # 8-byte Reload
movq %r15, %rsi
callq hipMalloc
leaq 64(%rsp), %rdi
movq 184(%rsp), %r14 # 8-byte Reload
movq %r14, %rsi
callq hipMalloc
leaq 24(%rsp), %rdi
movq 192(%rsp), %rbx # 8-byte Reload
movq %rbx, %rsi
callq hipMalloc
movq 72(%rsp), %rdi
movq 56(%rsp), %rsi # 8-byte Reload
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq 64(%rsp), %rdi
movq 48(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movq 88(%rsp), %rdi # 8-byte Reload
movl $1, %esi
movq 40(%rsp), %rdx # 8-byte Reload
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
movq 136(%rsp), %rsi # 8-byte Reload
jne .LBB1_39
# %bb.38:
movq 72(%rsp), %rax
movq 64(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 264(%rsp)
movq %rcx, 256(%rsp)
movq %rdx, 248(%rsp)
movl %esi, 124(%rsp)
movq 8(%rsp), %rax # 8-byte Reload
movl %eax, 120(%rsp)
movq 80(%rsp), %rax # 8-byte Reload
movl %eax, 116(%rsp)
movl %ebp, 112(%rsp)
leaq 264(%rsp), %rax
movq %rax, 272(%rsp)
leaq 256(%rsp), %rax
movq %rax, 280(%rsp)
leaq 248(%rsp), %rax
movq %rax, 288(%rsp)
leaq 124(%rsp), %rax
movq %rax, 296(%rsp)
leaq 120(%rsp), %rax
movq %rax, 304(%rsp)
leaq 116(%rsp), %rax
movq %rax, 312(%rsp)
leaq 112(%rsp), %rax
movq %rax, 320(%rsp)
leaq 232(%rsp), %rdi
leaq 216(%rsp), %rsi
leaq 208(%rsp), %rdx
leaq 200(%rsp), %rcx
callq __hipPopCallConfiguration
movq 232(%rsp), %rsi
movl 240(%rsp), %edx
movq 216(%rsp), %rcx
movl 224(%rsp), %r8d
leaq 272(%rsp), %r9
movl $_Z8convol2DPfS_S_iiii, %edi
pushq 200(%rsp)
.cfi_adjust_cfa_offset 8
pushq 216(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_39:
callq hipDeviceSynchronize
movq 24(%rsp), %rsi
movq 96(%rsp), %rdi # 8-byte Reload
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
movl 20(%rsp), %ecx # 4-byte Reload
testl %ecx, %ecx
movl 16(%rsp), %eax # 4-byte Reload
jle .LBB1_45
# %bb.40: # %.preheader.lr.ph
movl %ecx, %r14d
movl %eax, %r15d
xorl %r12d, %r12d
xorl %r13d, %r13d
jmp .LBB1_41
.p2align 4, 0x90
.LBB1_44: # %._crit_edge197
# in Loop: Header=BB1_41 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r13
movl 16(%rsp), %eax # 4-byte Reload
addl %eax, %r12d
cmpq %r14, %r13
je .LBB1_45
.LBB1_41: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_43 Depth 2
testl %eax, %eax
jle .LBB1_44
# %bb.42: # %.lr.ph196
# in Loop: Header=BB1_41 Depth=1
movl %r12d, %eax
movq 96(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_43: # Parent Loop BB1_41 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rbx,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
incq %rbp
cmpq %rbp, %r15
jne .LBB1_43
jmp .LBB1_44
.LBB1_45: # %._crit_edge199
movq 72(%rsp), %rdi
callq hipFree
movq 64(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi # 8-byte Reload
callq free
movq 48(%rsp), %rdi # 8-byte Reload
callq free
movq 96(%rsp), %rdi # 8-byte Reload
callq free
movq 144(%rsp), %rdi # 8-byte Reload
callq fclose
xorl %eax, %eax
addq $328, %rsp # imm = 0x148
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_46:
.cfi_def_cfa_offset 384
movq stderr(%rip), %rcx
movl $.L.str, %edi
movl $60, %esi
movl $1, %edx
callq fwrite@PLT
movl $1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8convol2DPfS_S_iiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8convol2DPfS_S_iiii,@object # @_Z8convol2DPfS_S_iiii
.section .rodata,"a",@progbits
.globl _Z8convol2DPfS_S_iiii
.p2align 3, 0x0
_Z8convol2DPfS_S_iiii:
.quad _Z23__device_stub__convol2DPfS_S_iiii
.size _Z8convol2DPfS_S_iiii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Incorrect arguments passed.\nUse \"./2dconvol.o <input_file>\"\n"
.size .L.str, 61
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "r"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Size of A: %dx%d\n"
.size .L.str.2, 18
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Size of H: %dx%d\n"
.size .L.str.3, 18
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Size of C: %dx%d\n"
.size .L.str.4, 18
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Size of grid: %dx%d\n"
.size .L.str.5, 21
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Size of block: %dx%d\n"
.size .L.str.6, 22
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "%f"
.size .L.str.7, 3
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%f "
.size .L.str.8, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z8convol2DPfS_S_iiii"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Completed Loading Matrices..."
.size .Lstr, 30
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__convol2DPfS_S_iiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8convol2DPfS_S_iiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
#define HISTOGRAM_LENGTH 256
__global__ void convertToGrayScale(unsigned char * ucharImg, unsigned char * grayImg, int width, int height)
{
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by*blockDim.y+ty;
int col = bx*blockDim.x+tx;
int index = row*width + col;
if(row < height && col < width)
{
grayImg[index] = (unsigned char) (0.21*ucharImg[index*3] + 0.71*ucharImg[index*3 + 1] + 0.07*ucharImg[index*3 + 2]);
}
} | code for sm_80
Function : _Z18convertToGrayScalePhS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002600 */
/*0020*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002200 */
/*0030*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x4], R3 ; /* 0x0000010000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R2, c[0x0][0x0], R5 ; /* 0x0000000002037a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x170], P0 ; /* 0x00005c0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IMAD R0, R0, c[0x0][0x170], R3 ; /* 0x00005c0000007a24 */
/* 0x000fe200078e0203 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00c0*/ IMAD R3, R0, 0x3, RZ ; /* 0x0000000300037824 */
/* 0x000fca00078e02ff */
/*00d0*/ IADD3 R2, P0, R3, c[0x0][0x160], RZ ; /* 0x0000580003027a10 */
/* 0x000fc80007f1e0ff */
/*00e0*/ LEA.HI.X.SX32 R3, R3, c[0x0][0x164], 0x1, P0 ; /* 0x0000590003037a11 */
/* 0x000fca00000f0eff */
/*00f0*/ LDG.E.U8 R11, [R2.64+0x1] ; /* 0x00000104020b7981 */
/* 0x000ea8000c1e1100 */
/*0100*/ LDG.E.U8 R10, [R2.64] ; /* 0x00000004020a7981 */
/* 0x000ee8000c1e1100 */
/*0110*/ LDG.E.U8 R8, [R2.64+0x2] ; /* 0x0000020402087981 */
/* 0x000f22000c1e1100 */
/*0120*/ I2F.F64.U16 R6, R11 ; /* 0x0000000b00067312 */
/* 0x004e300000101800 */
/*0130*/ I2F.F64.U16 R4, R10 ; /* 0x0000000a00047312 */
/* 0x008e700000101800 */
/*0140*/ I2F.F64.U16 R8, R8 ; /* 0x0000000800087312 */
/* 0x010ea20000101800 */
/*0150*/ DMUL R6, R6, c[0x2][0x0] ; /* 0x0080000006067a28 */
/* 0x001e4c0000000000 */
/*0160*/ DFMA R4, R4, c[0x2][0x8], R6 ; /* 0x0080020004047a2b */
/* 0x0020a40000000006 */
/*0170*/ IADD3 R6, P0, R0, c[0x0][0x168], RZ ; /* 0x00005a0000067a10 */
/* 0x001fc80007f1e0ff */
/*0180*/ LEA.HI.X.SX32 R7, R0, c[0x0][0x16c], 0x1, P0 ; /* 0x00005b0000077a11 */
/* 0x000fe200000f0eff */
/*0190*/ DFMA R4, R8, c[0x2][0x10], R4 ; /* 0x0080040008047a2b */
/* 0x004e140000000004 */
/*01a0*/ F2I.U32.F64.TRUNC R5, R4 ; /* 0x0000000400057311 */
/* 0x001e24000030d000 */
/*01b0*/ STG.E.U8 [R6.64], R5 ; /* 0x0000000506007986 */
/* 0x001fe2000c101104 */
/*01c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
#define HISTOGRAM_LENGTH 256
__global__ void convertToGrayScale(unsigned char * ucharImg, unsigned char * grayImg, int width, int height)
{
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by*blockDim.y+ty;
int col = bx*blockDim.x+tx;
int index = row*width + col;
if(row < height && col < width)
{
grayImg[index] = (unsigned char) (0.21*ucharImg[index*3] + 0.71*ucharImg[index*3 + 1] + 0.07*ucharImg[index*3 + 2]);
}
} | .file "tmpxft_00022720_00000000-6_convertToGrayScale.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z18convertToGrayScalePhS_iiPhS_ii
.type _Z42__device_stub__Z18convertToGrayScalePhS_iiPhS_ii, @function
_Z42__device_stub__Z18convertToGrayScalePhS_iiPhS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z18convertToGrayScalePhS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z42__device_stub__Z18convertToGrayScalePhS_iiPhS_ii, .-_Z42__device_stub__Z18convertToGrayScalePhS_iiPhS_ii
.globl _Z18convertToGrayScalePhS_ii
.type _Z18convertToGrayScalePhS_ii, @function
_Z18convertToGrayScalePhS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z18convertToGrayScalePhS_iiPhS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z18convertToGrayScalePhS_ii, .-_Z18convertToGrayScalePhS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z18convertToGrayScalePhS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z18convertToGrayScalePhS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
#define HISTOGRAM_LENGTH 256
__global__ void convertToGrayScale(unsigned char * ucharImg, unsigned char * grayImg, int width, int height)
{
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by*blockDim.y+ty;
int col = bx*blockDim.x+tx;
int index = row*width + col;
if(row < height && col < width)
{
grayImg[index] = (unsigned char) (0.21*ucharImg[index*3] + 0.71*ucharImg[index*3 + 1] + 0.07*ucharImg[index*3 + 2]);
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
#define HISTOGRAM_LENGTH 256
__global__ void convertToGrayScale(unsigned char * ucharImg, unsigned char * grayImg, int width, int height)
{
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by*blockDim.y+ty;
int col = bx*blockDim.x+tx;
int index = row*width + col;
if(row < height && col < width)
{
grayImg[index] = (unsigned char) (0.21*ucharImg[index*3] + 0.71*ucharImg[index*3 + 1] + 0.07*ucharImg[index*3 + 2]);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
#define HISTOGRAM_LENGTH 256
__global__ void convertToGrayScale(unsigned char * ucharImg, unsigned char * grayImg, int width, int height)
{
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by*blockDim.y+ty;
int col = bx*blockDim.x+tx;
int index = row*width + col;
if(row < height && col < width)
{
grayImg[index] = (unsigned char) (0.21*ucharImg[index*3] + 0.71*ucharImg[index*3 + 1] + 0.07*ucharImg[index*3 + 2]);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z18convertToGrayScalePhS_ii
.globl _Z18convertToGrayScalePhS_ii
.p2align 8
.type _Z18convertToGrayScalePhS_ii,@function
_Z18convertToGrayScalePhS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x10
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v3, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s3, s2, 16
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s15, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s14, s2, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s5, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s4, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v0, s4, v[1:2]
v_lshl_add_u32 v0, v2, 1, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v1, 31, v0
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_mov_b32 s1, 0x3fe6b851
s_mov_b32 s0, 0xeb851eb8
s_clause 0x2
global_load_u8 v3, v[0:1], off offset:1
global_load_u8 v4, v[0:1], off
global_load_u8 v5, v[0:1], off offset:2
s_waitcnt vmcnt(2)
v_cvt_f64_u32_e32 v[0:1], v3
s_waitcnt vmcnt(1)
v_cvt_f64_u32_e32 v[3:4], v4
s_delay_alu instid0(VALU_DEP_2)
v_mul_f64 v[0:1], v[0:1], s[0:1]
s_mov_b32 s1, 0x3fcae147
s_mov_b32 s0, 0xae147ae1
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[0:1], v[3:4], s[0:1], v[0:1]
s_waitcnt vmcnt(0)
v_cvt_f64_u32_e32 v[3:4], v5
s_mov_b32 s1, 0x3fb1eb85
s_mov_b32 s0, 0x1eb851ec
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[0:1], v[3:4], s[0:1], v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_cvt_i32_f64_e32 v3, v[0:1]
v_ashrrev_i32_e32 v1, 31, v2
v_add_co_u32 v0, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_store_b8 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z18convertToGrayScalePhS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z18convertToGrayScalePhS_ii, .Lfunc_end0-_Z18convertToGrayScalePhS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z18convertToGrayScalePhS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z18convertToGrayScalePhS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
#define HISTOGRAM_LENGTH 256
__global__ void convertToGrayScale(unsigned char * ucharImg, unsigned char * grayImg, int width, int height)
{
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by*blockDim.y+ty;
int col = bx*blockDim.x+tx;
int index = row*width + col;
if(row < height && col < width)
{
grayImg[index] = (unsigned char) (0.21*ucharImg[index*3] + 0.71*ucharImg[index*3 + 1] + 0.07*ucharImg[index*3 + 2]);
}
} | .text
.file "convertToGrayScale.hip"
.globl _Z33__device_stub__convertToGrayScalePhS_ii # -- Begin function _Z33__device_stub__convertToGrayScalePhS_ii
.p2align 4, 0x90
.type _Z33__device_stub__convertToGrayScalePhS_ii,@function
_Z33__device_stub__convertToGrayScalePhS_ii: # @_Z33__device_stub__convertToGrayScalePhS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z18convertToGrayScalePhS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z33__device_stub__convertToGrayScalePhS_ii, .Lfunc_end0-_Z33__device_stub__convertToGrayScalePhS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18convertToGrayScalePhS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z18convertToGrayScalePhS_ii,@object # @_Z18convertToGrayScalePhS_ii
.section .rodata,"a",@progbits
.globl _Z18convertToGrayScalePhS_ii
.p2align 3, 0x0
_Z18convertToGrayScalePhS_ii:
.quad _Z33__device_stub__convertToGrayScalePhS_ii
.size _Z18convertToGrayScalePhS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z18convertToGrayScalePhS_ii"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z33__device_stub__convertToGrayScalePhS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z18convertToGrayScalePhS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z18convertToGrayScalePhS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002600 */
/*0020*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002200 */
/*0030*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x4], R3 ; /* 0x0000010000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R2, c[0x0][0x0], R5 ; /* 0x0000000002037a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x170], P0 ; /* 0x00005c0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IMAD R0, R0, c[0x0][0x170], R3 ; /* 0x00005c0000007a24 */
/* 0x000fe200078e0203 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00c0*/ IMAD R3, R0, 0x3, RZ ; /* 0x0000000300037824 */
/* 0x000fca00078e02ff */
/*00d0*/ IADD3 R2, P0, R3, c[0x0][0x160], RZ ; /* 0x0000580003027a10 */
/* 0x000fc80007f1e0ff */
/*00e0*/ LEA.HI.X.SX32 R3, R3, c[0x0][0x164], 0x1, P0 ; /* 0x0000590003037a11 */
/* 0x000fca00000f0eff */
/*00f0*/ LDG.E.U8 R11, [R2.64+0x1] ; /* 0x00000104020b7981 */
/* 0x000ea8000c1e1100 */
/*0100*/ LDG.E.U8 R10, [R2.64] ; /* 0x00000004020a7981 */
/* 0x000ee8000c1e1100 */
/*0110*/ LDG.E.U8 R8, [R2.64+0x2] ; /* 0x0000020402087981 */
/* 0x000f22000c1e1100 */
/*0120*/ I2F.F64.U16 R6, R11 ; /* 0x0000000b00067312 */
/* 0x004e300000101800 */
/*0130*/ I2F.F64.U16 R4, R10 ; /* 0x0000000a00047312 */
/* 0x008e700000101800 */
/*0140*/ I2F.F64.U16 R8, R8 ; /* 0x0000000800087312 */
/* 0x010ea20000101800 */
/*0150*/ DMUL R6, R6, c[0x2][0x0] ; /* 0x0080000006067a28 */
/* 0x001e4c0000000000 */
/*0160*/ DFMA R4, R4, c[0x2][0x8], R6 ; /* 0x0080020004047a2b */
/* 0x0020a40000000006 */
/*0170*/ IADD3 R6, P0, R0, c[0x0][0x168], RZ ; /* 0x00005a0000067a10 */
/* 0x001fc80007f1e0ff */
/*0180*/ LEA.HI.X.SX32 R7, R0, c[0x0][0x16c], 0x1, P0 ; /* 0x00005b0000077a11 */
/* 0x000fe200000f0eff */
/*0190*/ DFMA R4, R8, c[0x2][0x10], R4 ; /* 0x0080040008047a2b */
/* 0x004e140000000004 */
/*01a0*/ F2I.U32.F64.TRUNC R5, R4 ; /* 0x0000000400057311 */
/* 0x001e24000030d000 */
/*01b0*/ STG.E.U8 [R6.64], R5 ; /* 0x0000000506007986 */
/* 0x001fe2000c101104 */
/*01c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z18convertToGrayScalePhS_ii
.globl _Z18convertToGrayScalePhS_ii
.p2align 8
.type _Z18convertToGrayScalePhS_ii,@function
_Z18convertToGrayScalePhS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x10
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v3, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s3, s2, 16
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s15, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s14, s2, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s5, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s4, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v0, s4, v[1:2]
v_lshl_add_u32 v0, v2, 1, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v1, 31, v0
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_mov_b32 s1, 0x3fe6b851
s_mov_b32 s0, 0xeb851eb8
s_clause 0x2
global_load_u8 v3, v[0:1], off offset:1
global_load_u8 v4, v[0:1], off
global_load_u8 v5, v[0:1], off offset:2
s_waitcnt vmcnt(2)
v_cvt_f64_u32_e32 v[0:1], v3
s_waitcnt vmcnt(1)
v_cvt_f64_u32_e32 v[3:4], v4
s_delay_alu instid0(VALU_DEP_2)
v_mul_f64 v[0:1], v[0:1], s[0:1]
s_mov_b32 s1, 0x3fcae147
s_mov_b32 s0, 0xae147ae1
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[0:1], v[3:4], s[0:1], v[0:1]
s_waitcnt vmcnt(0)
v_cvt_f64_u32_e32 v[3:4], v5
s_mov_b32 s1, 0x3fb1eb85
s_mov_b32 s0, 0x1eb851ec
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[0:1], v[3:4], s[0:1], v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_cvt_i32_f64_e32 v3, v[0:1]
v_ashrrev_i32_e32 v1, 31, v2
v_add_co_u32 v0, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_store_b8 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z18convertToGrayScalePhS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z18convertToGrayScalePhS_ii, .Lfunc_end0-_Z18convertToGrayScalePhS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z18convertToGrayScalePhS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z18convertToGrayScalePhS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00022720_00000000-6_convertToGrayScale.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z18convertToGrayScalePhS_iiPhS_ii
.type _Z42__device_stub__Z18convertToGrayScalePhS_iiPhS_ii, @function
_Z42__device_stub__Z18convertToGrayScalePhS_iiPhS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z18convertToGrayScalePhS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z42__device_stub__Z18convertToGrayScalePhS_iiPhS_ii, .-_Z42__device_stub__Z18convertToGrayScalePhS_iiPhS_ii
.globl _Z18convertToGrayScalePhS_ii
.type _Z18convertToGrayScalePhS_ii, @function
_Z18convertToGrayScalePhS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z18convertToGrayScalePhS_iiPhS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z18convertToGrayScalePhS_ii, .-_Z18convertToGrayScalePhS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z18convertToGrayScalePhS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z18convertToGrayScalePhS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "convertToGrayScale.hip"
.globl _Z33__device_stub__convertToGrayScalePhS_ii # -- Begin function _Z33__device_stub__convertToGrayScalePhS_ii
.p2align 4, 0x90
.type _Z33__device_stub__convertToGrayScalePhS_ii,@function
_Z33__device_stub__convertToGrayScalePhS_ii: # @_Z33__device_stub__convertToGrayScalePhS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z18convertToGrayScalePhS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z33__device_stub__convertToGrayScalePhS_ii, .Lfunc_end0-_Z33__device_stub__convertToGrayScalePhS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18convertToGrayScalePhS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z18convertToGrayScalePhS_ii,@object # @_Z18convertToGrayScalePhS_ii
.section .rodata,"a",@progbits
.globl _Z18convertToGrayScalePhS_ii
.p2align 3, 0x0
_Z18convertToGrayScalePhS_ii:
.quad _Z33__device_stub__convertToGrayScalePhS_ii
.size _Z18convertToGrayScalePhS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z18convertToGrayScalePhS_ii"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z33__device_stub__convertToGrayScalePhS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z18convertToGrayScalePhS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda_runtime_api.h>
__global__ void clamp_kernel(
float *y,
int dim,
float clamp_lo,
float clamp_hi)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float y_i = y[idx];
if (y_i < clamp_lo) {
y[idx] = clamp_lo;
} else if (y_i > clamp_hi) {
y[idx] = clamp_hi;
}
}
}
extern "C" void neuralops_cuda_clamp(
float *y,
size_t dim,
float clamp_lo,
float clamp_hi,
cudaStream_t stream)
{
clamp_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
y, dim, clamp_lo, clamp_hi);
} | code for sm_80
Function : _Z12clamp_kernelPfiff
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R2, R3, c[0x0][0x0], R2 ; /* 0x0000000003027a24 */
/* 0x001fca00078e0202 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ FSETP.GEU.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0b */
/* 0x004fda0003f0e000 */
/*00b0*/ @!P0 BRA 0x110 ; /* 0x0000005000008947 */
/* 0x000fea0003800000 */
/*00c0*/ FSETP.GT.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0b */
/* 0x000fda0003f04000 */
/*00d0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00e0*/ MOV R5, c[0x0][0x170] ; /* 0x00005c0000057a02 */
/* 0x000fca0000000f00 */
/*00f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ MOV R5, c[0x0][0x16c] ; /* 0x00005b0000057a02 */
/* 0x000fca0000000f00 */
/*0120*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda_runtime_api.h>
__global__ void clamp_kernel(
float *y,
int dim,
float clamp_lo,
float clamp_hi)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float y_i = y[idx];
if (y_i < clamp_lo) {
y[idx] = clamp_lo;
} else if (y_i > clamp_hi) {
y[idx] = clamp_hi;
}
}
}
extern "C" void neuralops_cuda_clamp(
float *y,
size_t dim,
float clamp_lo,
float clamp_hi,
cudaStream_t stream)
{
clamp_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
y, dim, clamp_lo, clamp_hi);
} | .file "tmpxft_0013b916_00000000-6_clamp.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z12clamp_kernelPfiffPfiff
.type _Z35__device_stub__Z12clamp_kernelPfiffPfiff, @function
_Z35__device_stub__Z12clamp_kernelPfiffPfiff:
.LFB2052:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movss %xmm0, 16(%rsp)
movss %xmm1, 12(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 12(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12clamp_kernelPfiff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z35__device_stub__Z12clamp_kernelPfiffPfiff, .-_Z35__device_stub__Z12clamp_kernelPfiffPfiff
.globl _Z12clamp_kernelPfiff
.type _Z12clamp_kernelPfiff, @function
_Z12clamp_kernelPfiff:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z12clamp_kernelPfiffPfiff
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z12clamp_kernelPfiff, .-_Z12clamp_kernelPfiff
.globl neuralops_cuda_clamp
.type neuralops_cuda_clamp, @function
neuralops_cuda_clamp:
.LFB2027:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $56, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbp
movq %rsi, %rbx
movss %xmm0, 8(%rsp)
movss %xmm1, 12(%rsp)
movq %rdx, %r9
movl $1024, 36(%rsp)
movl $1, 40(%rsp)
leaq 1023(%rsi), %rax
shrq $10, %rax
movl %eax, 24(%rsp)
movl $1, 28(%rsp)
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L11:
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movss 12(%rsp), %xmm1
movss 8(%rsp), %xmm0
movl %ebx, %esi
movq %rbp, %rdi
call _Z35__device_stub__Z12clamp_kernelPfiffPfiff
jmp .L11
.cfi_endproc
.LFE2027:
.size neuralops_cuda_clamp, .-neuralops_cuda_clamp
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z12clamp_kernelPfiff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z12clamp_kernelPfiff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda_runtime_api.h>
__global__ void clamp_kernel(
float *y,
int dim,
float clamp_lo,
float clamp_hi)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float y_i = y[idx];
if (y_i < clamp_lo) {
y[idx] = clamp_lo;
} else if (y_i > clamp_hi) {
y[idx] = clamp_hi;
}
}
}
extern "C" void neuralops_cuda_clamp(
float *y,
size_t dim,
float clamp_lo,
float clamp_hi,
cudaStream_t stream)
{
clamp_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
y, dim, clamp_lo, clamp_hi);
} | #include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
__global__ void clamp_kernel(
float *y,
int dim,
float clamp_lo,
float clamp_hi)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float y_i = y[idx];
if (y_i < clamp_lo) {
y[idx] = clamp_lo;
} else if (y_i > clamp_hi) {
y[idx] = clamp_hi;
}
}
}
extern "C" void neuralops_cuda_clamp(
float *y,
size_t dim,
float clamp_lo,
float clamp_hi,
hipStream_t stream)
{
clamp_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
y, dim, clamp_lo, clamp_hi);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
__global__ void clamp_kernel(
float *y,
int dim,
float clamp_lo,
float clamp_hi)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float y_i = y[idx];
if (y_i < clamp_lo) {
y[idx] = clamp_lo;
} else if (y_i > clamp_hi) {
y[idx] = clamp_hi;
}
}
}
extern "C" void neuralops_cuda_clamp(
float *y,
size_t dim,
float clamp_lo,
float clamp_hi,
hipStream_t stream)
{
clamp_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
y, dim, clamp_lo, clamp_hi);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12clamp_kernelPfiff
.globl _Z12clamp_kernelPfiff
.p2align 8
.type _Z12clamp_kernelPfiff,@function
_Z12clamp_kernelPfiff:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_5
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s4, s[0:1], 0xc
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
v_mov_b32_e32 v2, s4
s_mov_b32 s3, exec_lo
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_cmp_gt_f32_e64 s2, s4, v3
v_cmpx_ngt_f32_e32 s4, v3
s_cbranch_execz .LBB0_3
s_load_b32 s0, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
v_cmp_lt_f32_e32 vcc_lo, s0, v3
v_mov_b32_e32 v2, s0
s_and_not1_b32 s0, s2, exec_lo
s_and_b32 s1, vcc_lo, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s2, s0, s1
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s3
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s2
s_cbranch_execz .LBB0_5
global_store_b32 v[0:1], v2, off
.LBB0_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12clamp_kernelPfiff
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12clamp_kernelPfiff, .Lfunc_end0-_Z12clamp_kernelPfiff
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12clamp_kernelPfiff
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12clamp_kernelPfiff.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
__global__ void clamp_kernel(
float *y,
int dim,
float clamp_lo,
float clamp_hi)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float y_i = y[idx];
if (y_i < clamp_lo) {
y[idx] = clamp_lo;
} else if (y_i > clamp_hi) {
y[idx] = clamp_hi;
}
}
}
extern "C" void neuralops_cuda_clamp(
float *y,
size_t dim,
float clamp_lo,
float clamp_hi,
hipStream_t stream)
{
clamp_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
y, dim, clamp_lo, clamp_hi);
} | .text
.file "clamp.hip"
.globl _Z27__device_stub__clamp_kernelPfiff # -- Begin function _Z27__device_stub__clamp_kernelPfiff
.p2align 4, 0x90
.type _Z27__device_stub__clamp_kernelPfiff,@function
_Z27__device_stub__clamp_kernelPfiff: # @_Z27__device_stub__clamp_kernelPfiff
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movl %esi, 20(%rsp)
movss %xmm0, 16(%rsp)
movss %xmm1, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12clamp_kernelPfiff, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z27__device_stub__clamp_kernelPfiff, .Lfunc_end0-_Z27__device_stub__clamp_kernelPfiff
.cfi_endproc
# -- End function
.globl neuralops_cuda_clamp # -- Begin function neuralops_cuda_clamp
.p2align 4, 0x90
.type neuralops_cuda_clamp,@function
neuralops_cuda_clamp: # @neuralops_cuda_clamp
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $120, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rdx, %r9
movss %xmm1, 8(%rsp) # 4-byte Spill
movss %xmm0, 4(%rsp) # 4-byte Spill
movq %rsi, %rbx
movq %rdi, %r14
leaq 1023(%rsi), %rax
shrq $10, %rax
movl %eax, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $1024, %rdx # imm = 0x400
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq %r14, 72(%rsp)
movl %ebx, 20(%rsp)
movss 4(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 16(%rsp)
movss 8(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12clamp_kernelPfiff, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
addq $120, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size neuralops_cuda_clamp, .Lfunc_end1-neuralops_cuda_clamp
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12clamp_kernelPfiff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12clamp_kernelPfiff,@object # @_Z12clamp_kernelPfiff
.section .rodata,"a",@progbits
.globl _Z12clamp_kernelPfiff
.p2align 3, 0x0
_Z12clamp_kernelPfiff:
.quad _Z27__device_stub__clamp_kernelPfiff
.size _Z12clamp_kernelPfiff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z12clamp_kernelPfiff"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__clamp_kernelPfiff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12clamp_kernelPfiff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z12clamp_kernelPfiff
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R2, R3, c[0x0][0x0], R2 ; /* 0x0000000003027a24 */
/* 0x001fca00078e0202 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ FSETP.GEU.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0b */
/* 0x004fda0003f0e000 */
/*00b0*/ @!P0 BRA 0x110 ; /* 0x0000005000008947 */
/* 0x000fea0003800000 */
/*00c0*/ FSETP.GT.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0b */
/* 0x000fda0003f04000 */
/*00d0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00e0*/ MOV R5, c[0x0][0x170] ; /* 0x00005c0000057a02 */
/* 0x000fca0000000f00 */
/*00f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ MOV R5, c[0x0][0x16c] ; /* 0x00005b0000057a02 */
/* 0x000fca0000000f00 */
/*0120*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12clamp_kernelPfiff
.globl _Z12clamp_kernelPfiff
.p2align 8
.type _Z12clamp_kernelPfiff,@function
_Z12clamp_kernelPfiff:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_5
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s4, s[0:1], 0xc
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
v_mov_b32_e32 v2, s4
s_mov_b32 s3, exec_lo
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_cmp_gt_f32_e64 s2, s4, v3
v_cmpx_ngt_f32_e32 s4, v3
s_cbranch_execz .LBB0_3
s_load_b32 s0, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
v_cmp_lt_f32_e32 vcc_lo, s0, v3
v_mov_b32_e32 v2, s0
s_and_not1_b32 s0, s2, exec_lo
s_and_b32 s1, vcc_lo, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s2, s0, s1
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s3
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s2
s_cbranch_execz .LBB0_5
global_store_b32 v[0:1], v2, off
.LBB0_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12clamp_kernelPfiff
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12clamp_kernelPfiff, .Lfunc_end0-_Z12clamp_kernelPfiff
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12clamp_kernelPfiff
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12clamp_kernelPfiff.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0013b916_00000000-6_clamp.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z12clamp_kernelPfiffPfiff
.type _Z35__device_stub__Z12clamp_kernelPfiffPfiff, @function
_Z35__device_stub__Z12clamp_kernelPfiffPfiff:
.LFB2052:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movss %xmm0, 16(%rsp)
movss %xmm1, 12(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 12(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12clamp_kernelPfiff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z35__device_stub__Z12clamp_kernelPfiffPfiff, .-_Z35__device_stub__Z12clamp_kernelPfiffPfiff
.globl _Z12clamp_kernelPfiff
.type _Z12clamp_kernelPfiff, @function
_Z12clamp_kernelPfiff:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z12clamp_kernelPfiffPfiff
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z12clamp_kernelPfiff, .-_Z12clamp_kernelPfiff
.globl neuralops_cuda_clamp
.type neuralops_cuda_clamp, @function
neuralops_cuda_clamp:
.LFB2027:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $56, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbp
movq %rsi, %rbx
movss %xmm0, 8(%rsp)
movss %xmm1, 12(%rsp)
movq %rdx, %r9
movl $1024, 36(%rsp)
movl $1, 40(%rsp)
leaq 1023(%rsi), %rax
shrq $10, %rax
movl %eax, 24(%rsp)
movl $1, 28(%rsp)
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L11:
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movss 12(%rsp), %xmm1
movss 8(%rsp), %xmm0
movl %ebx, %esi
movq %rbp, %rdi
call _Z35__device_stub__Z12clamp_kernelPfiffPfiff
jmp .L11
.cfi_endproc
.LFE2027:
.size neuralops_cuda_clamp, .-neuralops_cuda_clamp
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z12clamp_kernelPfiff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z12clamp_kernelPfiff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "clamp.hip"
.globl _Z27__device_stub__clamp_kernelPfiff # -- Begin function _Z27__device_stub__clamp_kernelPfiff
.p2align 4, 0x90
.type _Z27__device_stub__clamp_kernelPfiff,@function
_Z27__device_stub__clamp_kernelPfiff: # @_Z27__device_stub__clamp_kernelPfiff
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movl %esi, 20(%rsp)
movss %xmm0, 16(%rsp)
movss %xmm1, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12clamp_kernelPfiff, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z27__device_stub__clamp_kernelPfiff, .Lfunc_end0-_Z27__device_stub__clamp_kernelPfiff
.cfi_endproc
# -- End function
.globl neuralops_cuda_clamp # -- Begin function neuralops_cuda_clamp
.p2align 4, 0x90
.type neuralops_cuda_clamp,@function
neuralops_cuda_clamp: # @neuralops_cuda_clamp
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $120, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rdx, %r9
movss %xmm1, 8(%rsp) # 4-byte Spill
movss %xmm0, 4(%rsp) # 4-byte Spill
movq %rsi, %rbx
movq %rdi, %r14
leaq 1023(%rsi), %rax
shrq $10, %rax
movl %eax, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $1024, %rdx # imm = 0x400
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq %r14, 72(%rsp)
movl %ebx, 20(%rsp)
movss 4(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 16(%rsp)
movss 8(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12clamp_kernelPfiff, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
addq $120, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size neuralops_cuda_clamp, .Lfunc_end1-neuralops_cuda_clamp
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12clamp_kernelPfiff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12clamp_kernelPfiff,@object # @_Z12clamp_kernelPfiff
.section .rodata,"a",@progbits
.globl _Z12clamp_kernelPfiff
.p2align 3, 0x0
_Z12clamp_kernelPfiff:
.quad _Z27__device_stub__clamp_kernelPfiff
.size _Z12clamp_kernelPfiff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z12clamp_kernelPfiff"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__clamp_kernelPfiff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12clamp_kernelPfiff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
Jaitirth Jacob - 13CO125 Vidit Bhargava - 13CO151
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define ITERATIONS 4 //Repeat the experiment for greater accuracy
__global__ void add(int *a, int *b, int *c, int tpb)
{
//Find the correct thread index in the grid
int i = blockIdx.x * tpb + threadIdx.x;
c[i] = a[i] + b[i];
}
#define N 1000000 //Array Size
#define min_threads 16
#define max_threads 1024
int main(void)
{
int *a,*b,*c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
//Allocate on device
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
srand(time(NULL));
//Populate a and b
for (int i = 0; i < N; ++i)
{
a[i] = rand()%20;
b[i] = rand()%37;
}
int numBlocks;
cudaEvent_t start, copy, exec, result; //Events for measuring time
//To calculate average over a number of iterations
float t1[7], t2[7], t3[7], total[7];
for (int i = 0; i < 7; ++i)
{
t1[i]=0;
t2[i]=0;
t3[i]=0;
total[i]=0;
}
printf("t1: time for copying arrays\n");
printf("t2: time for kernel execution\n");
printf("t3: time for copying result back\n\n");
printf("All times in milliseconds\n");
printf("TPB\t\tNB\t\tt1\t\tt2\t\tt3\t\ttotal\t\n");
int count;
for (int i = 0; i < ITERATIONS; ++i)
{
count=0;
for (int threadsPerBlock = min_threads; threadsPerBlock <= max_threads; threadsPerBlock*=2)
{
numBlocks = (N + threadsPerBlock - 1)/threadsPerBlock;
cudaEventCreate(&start);
cudaEventCreate(©);
cudaEventCreate(&exec);
cudaEventCreate(&result);
cudaEventRecord(start);
//Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaEventRecord(copy);
cudaEventSynchronize(copy);
//Launch add() kernel on GPU
add<<<numBlocks,threadsPerBlock>>>(d_a, d_b, d_c, threadsPerBlock);
cudaEventRecord(exec);
cudaEventSynchronize(exec);
//Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaEventRecord(result);
cudaEventSynchronize(result);
float temp1=0, temp2=0, temp3=0, temptotal;
cudaEventElapsedTime(&temp1, start, copy);
cudaEventElapsedTime(&temp2, copy, exec);
cudaEventElapsedTime(&temp3, exec, result);
cudaEventElapsedTime(&temptotal, start, result);
t1[count] += temp1;
t2[count] += temp2;
t3[count] += temp3;
total[count] += temptotal;
cudaEventDestroy(start);
cudaEventDestroy(copy);
cudaEventDestroy(exec);
cudaEventDestroy(result);
count++;
}
}
int threadsPerBlock = min_threads;
for (int i = 0; i < 7; ++i)
{
numBlocks = (N + threadsPerBlock - 1)/threadsPerBlock;
t1[i]/=(float)ITERATIONS;
t2[i]/=(float)ITERATIONS;
t3[i]/=(float)ITERATIONS;
total[i]/=(float)ITERATIONS;
printf("%d\t\t%d\t\t%.5f\t\t%.5f\t\t%.5f\t\t%.5f\t\t\n",
threadsPerBlock, numBlocks, t1[i], t2[i], t3[i], total[i]);
threadsPerBlock*=2;
}
//Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} | code for sm_80
Function : _Z3addPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R6, R6, c[0x0][0x178], R3 ; /* 0x00005e0006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00b0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
Jaitirth Jacob - 13CO125 Vidit Bhargava - 13CO151
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define ITERATIONS 4 //Repeat the experiment for greater accuracy
__global__ void add(int *a, int *b, int *c, int tpb)
{
//Find the correct thread index in the grid
int i = blockIdx.x * tpb + threadIdx.x;
c[i] = a[i] + b[i];
}
#define N 1000000 //Array Size
#define min_threads 16
#define max_threads 1024
int main(void)
{
int *a,*b,*c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
//Allocate on device
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
srand(time(NULL));
//Populate a and b
for (int i = 0; i < N; ++i)
{
a[i] = rand()%20;
b[i] = rand()%37;
}
int numBlocks;
cudaEvent_t start, copy, exec, result; //Events for measuring time
//To calculate average over a number of iterations
float t1[7], t2[7], t3[7], total[7];
for (int i = 0; i < 7; ++i)
{
t1[i]=0;
t2[i]=0;
t3[i]=0;
total[i]=0;
}
printf("t1: time for copying arrays\n");
printf("t2: time for kernel execution\n");
printf("t3: time for copying result back\n\n");
printf("All times in milliseconds\n");
printf("TPB\t\tNB\t\tt1\t\tt2\t\tt3\t\ttotal\t\n");
int count;
for (int i = 0; i < ITERATIONS; ++i)
{
count=0;
for (int threadsPerBlock = min_threads; threadsPerBlock <= max_threads; threadsPerBlock*=2)
{
numBlocks = (N + threadsPerBlock - 1)/threadsPerBlock;
cudaEventCreate(&start);
cudaEventCreate(©);
cudaEventCreate(&exec);
cudaEventCreate(&result);
cudaEventRecord(start);
//Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaEventRecord(copy);
cudaEventSynchronize(copy);
//Launch add() kernel on GPU
add<<<numBlocks,threadsPerBlock>>>(d_a, d_b, d_c, threadsPerBlock);
cudaEventRecord(exec);
cudaEventSynchronize(exec);
//Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaEventRecord(result);
cudaEventSynchronize(result);
float temp1=0, temp2=0, temp3=0, temptotal;
cudaEventElapsedTime(&temp1, start, copy);
cudaEventElapsedTime(&temp2, copy, exec);
cudaEventElapsedTime(&temp3, exec, result);
cudaEventElapsedTime(&temptotal, start, result);
t1[count] += temp1;
t2[count] += temp2;
t3[count] += temp3;
total[count] += temptotal;
cudaEventDestroy(start);
cudaEventDestroy(copy);
cudaEventDestroy(exec);
cudaEventDestroy(result);
count++;
}
}
int threadsPerBlock = min_threads;
for (int i = 0; i < 7; ++i)
{
numBlocks = (N + threadsPerBlock - 1)/threadsPerBlock;
t1[i]/=(float)ITERATIONS;
t2[i]/=(float)ITERATIONS;
t3[i]/=(float)ITERATIONS;
total[i]/=(float)ITERATIONS;
printf("%d\t\t%d\t\t%.5f\t\t%.5f\t\t%.5f\t\t%.5f\t\t\n",
threadsPerBlock, numBlocks, t1[i], t2[i], t3[i], total[i]);
threadsPerBlock*=2;
}
//Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} | .file "tmpxft_0016390e_00000000-6_q3.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z3addPiS_S_iPiS_S_i
.type _Z27__device_stub__Z3addPiS_S_iPiS_S_i, @function
_Z27__device_stub__Z3addPiS_S_iPiS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z3addPiS_S_iPiS_S_i, .-_Z27__device_stub__Z3addPiS_S_iPiS_S_i
.globl _Z3addPiS_S_i
.type _Z3addPiS_S_i, @function
_Z3addPiS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z3addPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z3addPiS_S_i, .-_Z3addPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "t1: time for copying arrays\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "t2: time for kernel execution\n"
.align 8
.LC3:
.string "t3: time for copying result back\n\n"
.section .rodata.str1.1
.LC4:
.string "All times in milliseconds\n"
.LC5:
.string "TPB\t\tNB\t\tt1\t\tt2\t\tt3\t\ttotal\t\n"
.section .rodata.str1.8
.align 8
.LC7:
.string "%d\t\t%d\t\t%.5f\t\t%.5f\t\t%.5f\t\t%.5f\t\t\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $264, %rsp
.cfi_def_cfa_offset 320
movq %fs:40, %rax
movq %rax, 248(%rsp)
xorl %eax, %eax
movl $4000000, %edi
call malloc@PLT
movq %rax, %r14
movl $4000000, %edi
call malloc@PLT
movq %rax, %r13
movl $4000000, %edi
call malloc@PLT
movq %rax, %r15
leaq 32(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movl $0, %ebx
.L12:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $35, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
sall $2, %edx
subl %edx, %eax
movl %eax, (%r14,%rbx)
call rand@PLT
movslq %eax, %rdx
imulq $-580400985, %rdx, %rdx
shrq $32, %rdx
addl %eax, %edx
sarl $5, %edx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,8), %ecx
leal (%rdx,%rcx,4), %edx
subl %edx, %eax
movl %eax, 0(%r13,%rbx)
addq $4, %rbx
cmpq $4000000, %rbx
jne .L12
movl $0, %eax
.L13:
movl $0x00000000, 112(%rsp,%rax)
movl $0x00000000, 144(%rsp,%rax)
movl $0x00000000, 176(%rsp,%rax)
movl $0x00000000, 208(%rsp,%rax)
addq $4, %rax
cmpq $28, %rax
jne .L13
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $4, 12(%rsp)
leaq 56(%rsp), %rax
movq %rax, (%rsp)
jmp .L14
.L15:
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movq 72(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $2, %ecx
movl $4000000, %edx
movq 48(%rsp), %rsi
movq %r15, %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 80(%rsp), %rdi
call cudaEventRecord@PLT
movq 80(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $0x00000000, 24(%rsp)
movl $0x00000000, 28(%rsp)
movl $0x00000000, 88(%rsp)
leaq 24(%rsp), %rdi
movq 64(%rsp), %rdx
movq 56(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 28(%rsp), %rdi
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 88(%rsp), %rdi
movq 80(%rsp), %rdx
movq 72(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 100(%rsp), %rdi
movq 80(%rsp), %rdx
movq 56(%rsp), %rsi
call cudaEventElapsedTime@PLT
movss 112(%rsp,%rbx), %xmm0
addss 24(%rsp), %xmm0
movss %xmm0, 112(%rsp,%rbx)
movss 144(%rsp,%rbx), %xmm0
addss 28(%rsp), %xmm0
movss %xmm0, 144(%rsp,%rbx)
movss 176(%rsp,%rbx), %xmm0
addss 88(%rsp), %xmm0
movss %xmm0, 176(%rsp,%rbx)
movss 208(%rsp,%rbx), %xmm0
addss 100(%rsp), %xmm0
movss %xmm0, 208(%rsp,%rbx)
movq 56(%rsp), %rdi
call cudaEventDestroy@PLT
movq 64(%rsp), %rdi
call cudaEventDestroy@PLT
movq 72(%rsp), %rdi
call cudaEventDestroy@PLT
movq 80(%rsp), %rdi
call cudaEventDestroy@PLT
addl %ebp, %ebp
addq $4, %rbx
cmpq $28, %rbx
je .L25
.L16:
leal 999999(%rbp), %eax
cltd
idivl %ebp
movl %eax, %r12d
movq (%rsp), %rdi
call cudaEventCreate@PLT
leaq 64(%rsp), %rdi
call cudaEventCreate@PLT
leaq 72(%rsp), %rdi
call cudaEventCreate@PLT
leaq 80(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 56(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, %ecx
movl $4000000, %edx
movq %r14, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4000000, %edx
movq %r13, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movq 64(%rsp), %rdi
call cudaEventSynchronize@PLT
movl %ebp, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl %r12d, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 100(%rsp), %rdx
movl $1, %ecx
movq 88(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L15
movl %ebp, %ecx
movq 48(%rsp), %rdx
movq 40(%rsp), %rsi
movq 32(%rsp), %rdi
call _Z27__device_stub__Z3addPiS_S_iPiS_S_i
jmp .L15
.L25:
subl $1, 12(%rsp)
je .L19
.L14:
movl $0, %ebx
movl $16, %ebp
jmp .L16
.L19:
movl $0, %ebx
movl $16, %ebp
leaq .LC7(%rip), %r12
.L17:
movss .LC6(%rip), %xmm0
mulss 112(%rsp,%rbx), %xmm0
movss %xmm0, 112(%rsp,%rbx)
movss .LC6(%rip), %xmm1
mulss 144(%rsp,%rbx), %xmm1
movss %xmm1, 144(%rsp,%rbx)
movss .LC6(%rip), %xmm2
mulss 176(%rsp,%rbx), %xmm2
movss %xmm2, 176(%rsp,%rbx)
movss .LC6(%rip), %xmm3
mulss 208(%rsp,%rbx), %xmm3
movss %xmm3, 208(%rsp,%rbx)
cvtss2sd %xmm0, %xmm0
leal 999999(%rbp), %eax
cltd
idivl %ebp
cvtss2sd %xmm3, %xmm3
cvtss2sd %xmm2, %xmm2
cvtss2sd %xmm1, %xmm1
movl %eax, %ecx
movl %ebp, %edx
movq %r12, %rsi
movl $2, %edi
movl $4, %eax
call __printf_chk@PLT
addl %ebp, %ebp
addq $4, %rbx
cmpq $28, %rbx
jne .L17
movq %r14, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 248(%rsp), %rax
subq %fs:40, %rax
jne .L26
movl $0, %eax
addq $264, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z3addPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC6:
.long 1048576000
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.