system
stringclasses
7 values
input
stringlengths
10
94.5k
output
stringlengths
14
55.5k
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #define N 10 // ( N )x( N ) matrix containing data // The idea with an aligned array is that the GPU will perform better if you pad // it's data array so that it can fit better in cache. CUDA accomplishes this // with the cudaMallocPitch() call. pitch (of type size_t) is the number of bytes // per row in the array on the device. This is equivilent to // sizeof(arraytype)*(columns + paddingColumns) OR sizeof(arraytype)*numDeviceColumns // // This does mean that care must be taken when copying data to and from the device // because the array is no longer completely linear (it has padding). // CUDA offers a convenience function called cudaMemcpy2D() which allows you to specify // the array rows/cols as well as the pitch for the source and the destination __global__ void kernel(float *A, float *B, int devWidth) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int o = i+devWidth*j; B[o] = A[o]; //B[o] = o; // uncomment to print the indexes } void print(float *A, int nx, int ny) { for (int j = 0; j < ny; j++) { for (int i = 0; i < nx; i++) { printf("%4.0f ",A[i+nx*j]); } printf("\n"); } printf("\n"); } int main(int argc, char * argv[]) { // Allocate on host float *h_A, *h_B; float *d_A, *d_B; h_A = (float*)malloc(sizeof(float)*N*N); h_B = (float*)malloc(sizeof(float)*N*N); size_t h_pitch=sizeof(float)*N; // Initialize Array h_A for (int i = 0; i < N*N; i++) h_A[i]=i; //h_A[i]=0; <-- use to print the padded indexes // Allocate pictched memory for padded arrays d_A and d_B size_t d_pitch; // actual number of columns in the device array cudaMallocPitch((void**)&d_A,&d_pitch,sizeof(float)*N,N); cudaMallocPitch((void**)&d_B,&d_pitch,sizeof(float)*N,N); int devWidth = d_pitch/sizeof(float); // Copy memory from unpadded array A to padded array B cudaMemcpy2D(d_A,d_pitch,h_A,h_pitch,sizeof(float)*N,N,cudaMemcpyHostToDevice); dim3 threads = dim3(16,16); dim3 blocks = dim3((N+16-1)/16,(N+16-1)/16); kernel<<<blocks,threads>>>(d_A,d_B,devWidth); // Copy memory from padded array B to unpadded array A cudaMemcpy2D(h_B,h_pitch,d_B,d_pitch,sizeof(float)*N,N,cudaMemcpyDeviceToHost); // Are they the same? print(h_A,N,N); print(h_B,N,N); // Release Arrays cudaFree(d_A); cudaFree(d_B); free(h_A); free(h_B); }
.file "tmpxft_00105325_00000000-6_AlingMemoryExample.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%4.0f " .LC1: .string "\n" .text .globl _Z5printPfii .type _Z5printPfii, @function _Z5printPfii: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $40, %rsp .cfi_def_cfa_offset 96 movq %rdi, 16(%rsp) movl %edx, 12(%rsp) testl %edx, %edx jle .L4 movl %esi, %r15d movl $0, %r14d movl $0, %r13d movslq %esi, %rax movq %rax, 24(%rsp) leaq .LC0(%rip), %r12 jmp .L5 .L7: movslq %r14d, %rax movq 16(%rsp), %rcx leaq (%rcx,%rax,4), %rbx movq 24(%rsp), %rdx addq %rdx, %rax leaq (%rcx,%rax,4), %rbp .L6: pxor %xmm0, %xmm0 cvtss2sd (%rbx), %xmm0 movq %r12, %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT addq $4, %rbx cmpq %rbp, %rbx jne .L6 .L8: leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %r13d addl %r15d, %r14d cmpl %r13d, 12(%rsp) je .L4 .L5: testl %r15d, %r15d jg .L7 jmp .L8 .L4: leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z5printPfii, .-_Z5printPfii .globl _Z28__device_stub__Z6kernelPfS_iPfS_i .type _Z28__device_stub__Z6kernelPfS_iPfS_i, @function _Z28__device_stub__Z6kernelPfS_iPfS_i: .LFB2083: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 120(%rsp), %rax subq %fs:40, %rax jne .L16 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z6kernelPfS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z28__device_stub__Z6kernelPfS_iPfS_i, .-_Z28__device_stub__Z6kernelPfS_iPfS_i .globl _Z6kernelPfS_i .type _Z6kernelPfS_i, @function _Z6kernelPfS_i: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z28__device_stub__Z6kernelPfS_iPfS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z6kernelPfS_i, .-_Z6kernelPfS_i .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $64, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $400, %edi call malloc@PLT movq %rax, %rbx movl $400, %edi call malloc@PLT movq %rax, %rbp movl $0, %eax .L20: pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 movss %xmm0, (%rbx,%rax,4) addq $1, %rax cmpq $100, %rax jne .L20 leaq 24(%rsp), %r12 leaq 8(%rsp), %rdi movl $10, %ecx movl $40, %edx movq %r12, %rsi call cudaMallocPitch@PLT leaq 16(%rsp), %rdi movl $10, %ecx movl $40, %edx movq %r12, %rsi call cudaMallocPitch@PLT movq 24(%rsp), %r12 subq $8, %rsp .cfi_def_cfa_offset 104 pushq $1 .cfi_def_cfa_offset 112 movl $10, %r9d movl $40, %r8d movl $40, %ecx movq %rbx, %rdx movq %r12, %rsi movq 24(%rsp), %rdi call cudaMemcpy2D@PLT movl $16, 48(%rsp) movl $16, 52(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) addq $16, %rsp .cfi_def_cfa_offset 96 movl $0, %r9d movl $0, %r8d movq 32(%rsp), %rdx movl $1, %ecx movq 44(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L25 .L21: subq $8, %rsp .cfi_def_cfa_offset 104 pushq $2 .cfi_def_cfa_offset 112 movl $10, %r9d movl $40, %r8d movq 40(%rsp), %rcx movq 32(%rsp), %rdx movl $40, %esi movq %rbp, %rdi call cudaMemcpy2D@PLT addq $16, %rsp .cfi_def_cfa_offset 96 movl $10, %edx movl $10, %esi movq %rbx, %rdi call _Z5printPfii movl $10, %edx movl $10, %esi movq %rbp, %rdi call _Z5printPfii movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq %rbx, %rdi call free@PLT movq %rbp, %rdi call free@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L26 movl $0, %eax addq $64, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L25: .cfi_restore_state movq %r12, %rdx shrq $2, %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z28__device_stub__Z6kernelPfS_iPfS_i jmp .L21 .L26: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .section .rodata.str1.1 .LC2: .string "_Z6kernelPfS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z6kernelPfS_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #define N 10 // ( N )x( N ) matrix containing data // The idea with an aligned array is that the GPU will perform better if you pad // it's data array so that it can fit better in cache. CUDA accomplishes this // with the cudaMallocPitch() call. pitch (of type size_t) is the number of bytes // per row in the array on the device. This is equivilent to // sizeof(arraytype)*(columns + paddingColumns) OR sizeof(arraytype)*numDeviceColumns // // This does mean that care must be taken when copying data to and from the device // because the array is no longer completely linear (it has padding). // CUDA offers a convenience function called cudaMemcpy2D() which allows you to specify // the array rows/cols as well as the pitch for the source and the destination __global__ void kernel(float *A, float *B, int devWidth) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int o = i+devWidth*j; B[o] = A[o]; //B[o] = o; // uncomment to print the indexes } void print(float *A, int nx, int ny) { for (int j = 0; j < ny; j++) { for (int i = 0; i < nx; i++) { printf("%4.0f ",A[i+nx*j]); } printf("\n"); } printf("\n"); } int main(int argc, char * argv[]) { // Allocate on host float *h_A, *h_B; float *d_A, *d_B; h_A = (float*)malloc(sizeof(float)*N*N); h_B = (float*)malloc(sizeof(float)*N*N); size_t h_pitch=sizeof(float)*N; // Initialize Array h_A for (int i = 0; i < N*N; i++) h_A[i]=i; //h_A[i]=0; <-- use to print the padded indexes // Allocate pictched memory for padded arrays d_A and d_B size_t d_pitch; // actual number of columns in the device array cudaMallocPitch((void**)&d_A,&d_pitch,sizeof(float)*N,N); cudaMallocPitch((void**)&d_B,&d_pitch,sizeof(float)*N,N); int devWidth = d_pitch/sizeof(float); // Copy memory from unpadded array A to padded array B cudaMemcpy2D(d_A,d_pitch,h_A,h_pitch,sizeof(float)*N,N,cudaMemcpyHostToDevice); dim3 threads = dim3(16,16); dim3 blocks = dim3((N+16-1)/16,(N+16-1)/16); kernel<<<blocks,threads>>>(d_A,d_B,devWidth); // Copy memory from padded array B to unpadded array A cudaMemcpy2D(h_B,h_pitch,d_B,d_pitch,sizeof(float)*N,N,cudaMemcpyDeviceToHost); // Are they the same? print(h_A,N,N); print(h_B,N,N); // Release Arrays cudaFree(d_A); cudaFree(d_B); free(h_A); free(h_B); }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define N 10 // ( N )x( N ) matrix containing data // The idea with an aligned array is that the GPU will perform better if you pad // it's data array so that it can fit better in cache. CUDA accomplishes this // with the cudaMallocPitch() call. pitch (of type size_t) is the number of bytes // per row in the array on the device. This is equivilent to // sizeof(arraytype)*(columns + paddingColumns) OR sizeof(arraytype)*numDeviceColumns // // This does mean that care must be taken when copying data to and from the device // because the array is no longer completely linear (it has padding). // CUDA offers a convenience function called cudaMemcpy2D() which allows you to specify // the array rows/cols as well as the pitch for the source and the destination __global__ void kernel(float *A, float *B, int devWidth) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int o = i+devWidth*j; B[o] = A[o]; //B[o] = o; // uncomment to print the indexes } void print(float *A, int nx, int ny) { for (int j = 0; j < ny; j++) { for (int i = 0; i < nx; i++) { printf("%4.0f ",A[i+nx*j]); } printf("\n"); } printf("\n"); } int main(int argc, char * argv[]) { // Allocate on host float *h_A, *h_B; float *d_A, *d_B; h_A = (float*)malloc(sizeof(float)*N*N); h_B = (float*)malloc(sizeof(float)*N*N); size_t h_pitch=sizeof(float)*N; // Initialize Array h_A for (int i = 0; i < N*N; i++) h_A[i]=i; //h_A[i]=0; <-- use to print the padded indexes // Allocate pictched memory for padded arrays d_A and d_B size_t d_pitch; // actual number of columns in the device array hipMallocPitch((void**)&d_A,&d_pitch,sizeof(float)*N,N); hipMallocPitch((void**)&d_B,&d_pitch,sizeof(float)*N,N); int devWidth = d_pitch/sizeof(float); // Copy memory from unpadded array A to padded array B hipMemcpy2D(d_A,d_pitch,h_A,h_pitch,sizeof(float)*N,N,hipMemcpyHostToDevice); dim3 threads = dim3(16,16); dim3 blocks = dim3((N+16-1)/16,(N+16-1)/16); kernel<<<blocks,threads>>>(d_A,d_B,devWidth); // Copy memory from padded array B to unpadded array A hipMemcpy2D(h_B,h_pitch,d_B,d_pitch,sizeof(float)*N,N,hipMemcpyDeviceToHost); // Are they the same? print(h_A,N,N); print(h_B,N,N); // Release Arrays hipFree(d_A); hipFree(d_B); free(h_A); free(h_B); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define N 10 // ( N )x( N ) matrix containing data // The idea with an aligned array is that the GPU will perform better if you pad // it's data array so that it can fit better in cache. CUDA accomplishes this // with the cudaMallocPitch() call. pitch (of type size_t) is the number of bytes // per row in the array on the device. This is equivilent to // sizeof(arraytype)*(columns + paddingColumns) OR sizeof(arraytype)*numDeviceColumns // // This does mean that care must be taken when copying data to and from the device // because the array is no longer completely linear (it has padding). // CUDA offers a convenience function called cudaMemcpy2D() which allows you to specify // the array rows/cols as well as the pitch for the source and the destination __global__ void kernel(float *A, float *B, int devWidth) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int o = i+devWidth*j; B[o] = A[o]; //B[o] = o; // uncomment to print the indexes } void print(float *A, int nx, int ny) { for (int j = 0; j < ny; j++) { for (int i = 0; i < nx; i++) { printf("%4.0f ",A[i+nx*j]); } printf("\n"); } printf("\n"); } int main(int argc, char * argv[]) { // Allocate on host float *h_A, *h_B; float *d_A, *d_B; h_A = (float*)malloc(sizeof(float)*N*N); h_B = (float*)malloc(sizeof(float)*N*N); size_t h_pitch=sizeof(float)*N; // Initialize Array h_A for (int i = 0; i < N*N; i++) h_A[i]=i; //h_A[i]=0; <-- use to print the padded indexes // Allocate pictched memory for padded arrays d_A and d_B size_t d_pitch; // actual number of columns in the device array hipMallocPitch((void**)&d_A,&d_pitch,sizeof(float)*N,N); hipMallocPitch((void**)&d_B,&d_pitch,sizeof(float)*N,N); int devWidth = d_pitch/sizeof(float); // Copy memory from unpadded array A to padded array B hipMemcpy2D(d_A,d_pitch,h_A,h_pitch,sizeof(float)*N,N,hipMemcpyHostToDevice); dim3 threads = dim3(16,16); dim3 blocks = dim3((N+16-1)/16,(N+16-1)/16); kernel<<<blocks,threads>>>(d_A,d_B,devWidth); // Copy memory from padded array B to unpadded array A hipMemcpy2D(h_B,h_pitch,d_B,d_pitch,sizeof(float)*N,N,hipMemcpyDeviceToHost); // Are they the same? print(h_A,N,N); print(h_B,N,N); // Release Arrays hipFree(d_A); hipFree(d_B); free(h_A); free(h_B); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6kernelPfS_i .globl _Z6kernelPfS_i .p2align 8 .type _Z6kernelPfS_i,@function _Z6kernelPfS_i: s_clause 0x1 s_load_b32 s4, s[0:1], 0x24 s_load_b32 s2, s[0:1], 0x10 v_bfe_u32 v1, v0, 10, 10 v_and_b32_e32 v0, 0x3ff, v0 s_waitcnt lgkmcnt(0) s_lshr_b32 s3, s4, 16 s_and_b32 s4, s4, 0xffff v_mad_u64_u32 v[2:3], null, s15, s3, v[1:2] s_mul_i32 s14, s14, s4 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mul_lo_u32 v1, v2, s2 s_load_b128 s[0:3], s[0:1], 0x0 v_add3_u32 v0, s14, v0, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v1, 31, v0 v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s2, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo global_load_b32 v2, v[2:3], off s_waitcnt vmcnt(0) global_store_b32 v[0:1], v2, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6kernelPfS_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6kernelPfS_i, .Lfunc_end0-_Z6kernelPfS_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6kernelPfS_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z6kernelPfS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define N 10 // ( N )x( N ) matrix containing data // The idea with an aligned array is that the GPU will perform better if you pad // it's data array so that it can fit better in cache. CUDA accomplishes this // with the cudaMallocPitch() call. pitch (of type size_t) is the number of bytes // per row in the array on the device. This is equivilent to // sizeof(arraytype)*(columns + paddingColumns) OR sizeof(arraytype)*numDeviceColumns // // This does mean that care must be taken when copying data to and from the device // because the array is no longer completely linear (it has padding). // CUDA offers a convenience function called cudaMemcpy2D() which allows you to specify // the array rows/cols as well as the pitch for the source and the destination __global__ void kernel(float *A, float *B, int devWidth) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int o = i+devWidth*j; B[o] = A[o]; //B[o] = o; // uncomment to print the indexes } void print(float *A, int nx, int ny) { for (int j = 0; j < ny; j++) { for (int i = 0; i < nx; i++) { printf("%4.0f ",A[i+nx*j]); } printf("\n"); } printf("\n"); } int main(int argc, char * argv[]) { // Allocate on host float *h_A, *h_B; float *d_A, *d_B; h_A = (float*)malloc(sizeof(float)*N*N); h_B = (float*)malloc(sizeof(float)*N*N); size_t h_pitch=sizeof(float)*N; // Initialize Array h_A for (int i = 0; i < N*N; i++) h_A[i]=i; //h_A[i]=0; <-- use to print the padded indexes // Allocate pictched memory for padded arrays d_A and d_B size_t d_pitch; // actual number of columns in the device array hipMallocPitch((void**)&d_A,&d_pitch,sizeof(float)*N,N); hipMallocPitch((void**)&d_B,&d_pitch,sizeof(float)*N,N); int devWidth = d_pitch/sizeof(float); // Copy memory from unpadded array A to padded array B hipMemcpy2D(d_A,d_pitch,h_A,h_pitch,sizeof(float)*N,N,hipMemcpyHostToDevice); dim3 threads = dim3(16,16); dim3 blocks = dim3((N+16-1)/16,(N+16-1)/16); kernel<<<blocks,threads>>>(d_A,d_B,devWidth); // Copy memory from padded array B to unpadded array A hipMemcpy2D(h_B,h_pitch,d_B,d_pitch,sizeof(float)*N,N,hipMemcpyDeviceToHost); // Are they the same? print(h_A,N,N); print(h_B,N,N); // Release Arrays hipFree(d_A); hipFree(d_B); free(h_A); free(h_B); }
.text .file "AlingMemoryExample.hip" .globl _Z21__device_stub__kernelPfS_i # -- Begin function _Z21__device_stub__kernelPfS_i .p2align 4, 0x90 .type _Z21__device_stub__kernelPfS_i,@function _Z21__device_stub__kernelPfS_i: # @_Z21__device_stub__kernelPfS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6kernelPfS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z21__device_stub__kernelPfS_i, .Lfunc_end0-_Z21__device_stub__kernelPfS_i .cfi_endproc # -- End function .globl _Z5printPfii # -- Begin function _Z5printPfii .p2align 4, 0x90 .type _Z5printPfii,@function _Z5printPfii: # @_Z5printPfii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $24, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdi, 8(%rsp) # 8-byte Spill testl %edx, %edx jle .LBB1_6 # %bb.1: # %.preheader.lr.ph movl %esi, %ebx movl %edx, %eax movq %rax, 16(%rsp) # 8-byte Spill movl %esi, %r12d xorl %r13d, %r13d xorl %ebp, %ebp jmp .LBB1_2 .p2align 4, 0x90 .LBB1_5: # %._crit_edge # in Loop: Header=BB1_2 Depth=1 movl $10, %edi callq putchar@PLT incq %rbp addl %ebx, %r13d cmpq 16(%rsp), %rbp # 8-byte Folded Reload je .LBB1_6 .LBB1_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_4 Depth 2 testl %ebx, %ebx jle .LBB1_5 # %bb.3: # %.lr.ph # in Loop: Header=BB1_2 Depth=1 movl %r13d, %eax movq 8(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_4: # Parent Loop BB1_2 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r15 cmpq %r15, %r12 jne .LBB1_4 jmp .LBB1_5 .LBB1_6: # %._crit_edge14 movl $10, %edi addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp putchar@PLT # TAILCALL .Lfunc_end1: .size _Z5printPfii, .Lfunc_end1-_Z5printPfii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $144, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl $400, %edi # imm = 0x190 callq malloc movq %rax, %rbx movl $400, %edi # imm = 0x190 callq malloc movq %rax, %r14 xorl %eax, %eax .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movss %xmm0, (%rbx,%rax,4) incq %rax cmpq $100, %rax jne .LBB2_1 # %bb.2: leaq 24(%rsp), %rdi leaq 40(%rsp), %r15 movl $40, %edx movl $10, %ecx movq %r15, %rsi callq hipMallocPitch leaq 16(%rsp), %rdi movl $40, %edx movl $10, %ecx movq %r15, %rsi callq hipMallocPitch movq 40(%rsp), %r15 movq 24(%rsp), %rdi movl $1, (%rsp) movl $40, %ecx movl $40, %r8d movl $10, %r9d movq %r15, %rsi movq %rbx, %rdx callq hipMemcpy2D movabsq $4294967297, %rdi # imm = 0x100000001 movabsq $68719476752, %rdx # imm = 0x1000000010 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_4 # %bb.3: shrq $2, %r15 movq 24(%rsp), %rax movq 16(%rsp), %rcx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movl %r15d, 36(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 36(%rsp), %rax movq %rax, 128(%rsp) leaq 80(%rsp), %rdi leaq 64(%rsp), %rsi leaq 56(%rsp), %rdx leaq 48(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rax movq 48(%rsp), %rdi movq 80(%rsp), %rsi movl 88(%rsp), %edx movq 64(%rsp), %rcx movl 72(%rsp), %r8d movq %rdi, 8(%rsp) movq %rax, (%rsp) leaq 112(%rsp), %r9 movl $_Z6kernelPfS_i, %edi callq hipLaunchKernel .LBB2_4: movq 16(%rsp), %rdx movq 40(%rsp), %rcx movl $2, (%rsp) movl $40, %esi movl $40, %r8d movl $10, %r9d movq %r14, %rdi callq hipMemcpy2D xorl %r15d, %r15d movq %rbx, %r12 .p2align 4, 0x90 .LBB2_5: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB2_6 Depth 2 xorl %r13d, %r13d .p2align 4, 0x90 .LBB2_6: # Parent Loop BB2_5 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r12,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r13 cmpq $10, %r13 jne .LBB2_6 # %bb.7: # %._crit_edge.i # in Loop: Header=BB2_5 Depth=1 movl $10, %edi callq putchar@PLT incq %r15 addq $40, %r12 cmpq $10, %r15 jne .LBB2_5 # %bb.8: # %_Z5printPfii.exit movl $10, %edi callq putchar@PLT xorl %r15d, %r15d movq %r14, %r12 .p2align 4, 0x90 .LBB2_9: # %.preheader.i20 # =>This Loop Header: Depth=1 # Child Loop BB2_10 Depth 2 xorl %r13d, %r13d .p2align 4, 0x90 .LBB2_10: # Parent Loop BB2_9 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r12,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r13 cmpq $10, %r13 jne .LBB2_10 # %bb.11: # %._crit_edge.i25 # in Loop: Header=BB2_9 Depth=1 movl $10, %edi callq putchar@PLT incq %r15 addq $40, %r12 cmpq $10, %r15 jne .LBB2_9 # %bb.12: # %_Z5printPfii.exit30 movl $10, %edi callq putchar@PLT movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq %rbx, %rdi callq free movq %r14, %rdi callq free xorl %eax, %eax addq $144, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6kernelPfS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z6kernelPfS_i,@object # @_Z6kernelPfS_i .section .rodata,"a",@progbits .globl _Z6kernelPfS_i .p2align 3, 0x0 _Z6kernelPfS_i: .quad _Z21__device_stub__kernelPfS_i .size _Z6kernelPfS_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%4.0f " .size .L.str, 7 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6kernelPfS_i" .size .L__unnamed_1, 15 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__kernelPfS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6kernelPfS_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z6kernelPfS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e280000002100 */ /*0040*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */ /* 0x000e680000002600 */ /*0050*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */ /* 0x000e620000002200 */ /*0060*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fe400078e0203 */ /*0070*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */ /* 0x002fe200078e0205 */ /*0080*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fc600000001ff */ /*0090*/ IMAD R0, R3, c[0x0][0x170], R0 ; /* 0x00005c0003007a24 */ /* 0x000fce00078e0200 */ /*00a0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fcc00078e0205 */ /*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x000fca00078e0205 */ /*00d0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe2000c101904 */ /*00e0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6kernelPfS_i .globl _Z6kernelPfS_i .p2align 8 .type _Z6kernelPfS_i,@function _Z6kernelPfS_i: s_clause 0x1 s_load_b32 s4, s[0:1], 0x24 s_load_b32 s2, s[0:1], 0x10 v_bfe_u32 v1, v0, 10, 10 v_and_b32_e32 v0, 0x3ff, v0 s_waitcnt lgkmcnt(0) s_lshr_b32 s3, s4, 16 s_and_b32 s4, s4, 0xffff v_mad_u64_u32 v[2:3], null, s15, s3, v[1:2] s_mul_i32 s14, s14, s4 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mul_lo_u32 v1, v2, s2 s_load_b128 s[0:3], s[0:1], 0x0 v_add3_u32 v0, s14, v0, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v1, 31, v0 v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s2, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo global_load_b32 v2, v[2:3], off s_waitcnt vmcnt(0) global_store_b32 v[0:1], v2, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6kernelPfS_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6kernelPfS_i, .Lfunc_end0-_Z6kernelPfS_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6kernelPfS_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z6kernelPfS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00105325_00000000-6_AlingMemoryExample.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%4.0f " .LC1: .string "\n" .text .globl _Z5printPfii .type _Z5printPfii, @function _Z5printPfii: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $40, %rsp .cfi_def_cfa_offset 96 movq %rdi, 16(%rsp) movl %edx, 12(%rsp) testl %edx, %edx jle .L4 movl %esi, %r15d movl $0, %r14d movl $0, %r13d movslq %esi, %rax movq %rax, 24(%rsp) leaq .LC0(%rip), %r12 jmp .L5 .L7: movslq %r14d, %rax movq 16(%rsp), %rcx leaq (%rcx,%rax,4), %rbx movq 24(%rsp), %rdx addq %rdx, %rax leaq (%rcx,%rax,4), %rbp .L6: pxor %xmm0, %xmm0 cvtss2sd (%rbx), %xmm0 movq %r12, %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT addq $4, %rbx cmpq %rbp, %rbx jne .L6 .L8: leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %r13d addl %r15d, %r14d cmpl %r13d, 12(%rsp) je .L4 .L5: testl %r15d, %r15d jg .L7 jmp .L8 .L4: leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z5printPfii, .-_Z5printPfii .globl _Z28__device_stub__Z6kernelPfS_iPfS_i .type _Z28__device_stub__Z6kernelPfS_iPfS_i, @function _Z28__device_stub__Z6kernelPfS_iPfS_i: .LFB2083: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 120(%rsp), %rax subq %fs:40, %rax jne .L16 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z6kernelPfS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z28__device_stub__Z6kernelPfS_iPfS_i, .-_Z28__device_stub__Z6kernelPfS_iPfS_i .globl _Z6kernelPfS_i .type _Z6kernelPfS_i, @function _Z6kernelPfS_i: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z28__device_stub__Z6kernelPfS_iPfS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z6kernelPfS_i, .-_Z6kernelPfS_i .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $64, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $400, %edi call malloc@PLT movq %rax, %rbx movl $400, %edi call malloc@PLT movq %rax, %rbp movl $0, %eax .L20: pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 movss %xmm0, (%rbx,%rax,4) addq $1, %rax cmpq $100, %rax jne .L20 leaq 24(%rsp), %r12 leaq 8(%rsp), %rdi movl $10, %ecx movl $40, %edx movq %r12, %rsi call cudaMallocPitch@PLT leaq 16(%rsp), %rdi movl $10, %ecx movl $40, %edx movq %r12, %rsi call cudaMallocPitch@PLT movq 24(%rsp), %r12 subq $8, %rsp .cfi_def_cfa_offset 104 pushq $1 .cfi_def_cfa_offset 112 movl $10, %r9d movl $40, %r8d movl $40, %ecx movq %rbx, %rdx movq %r12, %rsi movq 24(%rsp), %rdi call cudaMemcpy2D@PLT movl $16, 48(%rsp) movl $16, 52(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) addq $16, %rsp .cfi_def_cfa_offset 96 movl $0, %r9d movl $0, %r8d movq 32(%rsp), %rdx movl $1, %ecx movq 44(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L25 .L21: subq $8, %rsp .cfi_def_cfa_offset 104 pushq $2 .cfi_def_cfa_offset 112 movl $10, %r9d movl $40, %r8d movq 40(%rsp), %rcx movq 32(%rsp), %rdx movl $40, %esi movq %rbp, %rdi call cudaMemcpy2D@PLT addq $16, %rsp .cfi_def_cfa_offset 96 movl $10, %edx movl $10, %esi movq %rbx, %rdi call _Z5printPfii movl $10, %edx movl $10, %esi movq %rbp, %rdi call _Z5printPfii movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq %rbx, %rdi call free@PLT movq %rbp, %rdi call free@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L26 movl $0, %eax addq $64, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L25: .cfi_restore_state movq %r12, %rdx shrq $2, %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z28__device_stub__Z6kernelPfS_iPfS_i jmp .L21 .L26: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .section .rodata.str1.1 .LC2: .string "_Z6kernelPfS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z6kernelPfS_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "AlingMemoryExample.hip" .globl _Z21__device_stub__kernelPfS_i # -- Begin function _Z21__device_stub__kernelPfS_i .p2align 4, 0x90 .type _Z21__device_stub__kernelPfS_i,@function _Z21__device_stub__kernelPfS_i: # @_Z21__device_stub__kernelPfS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6kernelPfS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z21__device_stub__kernelPfS_i, .Lfunc_end0-_Z21__device_stub__kernelPfS_i .cfi_endproc # -- End function .globl _Z5printPfii # -- Begin function _Z5printPfii .p2align 4, 0x90 .type _Z5printPfii,@function _Z5printPfii: # @_Z5printPfii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $24, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdi, 8(%rsp) # 8-byte Spill testl %edx, %edx jle .LBB1_6 # %bb.1: # %.preheader.lr.ph movl %esi, %ebx movl %edx, %eax movq %rax, 16(%rsp) # 8-byte Spill movl %esi, %r12d xorl %r13d, %r13d xorl %ebp, %ebp jmp .LBB1_2 .p2align 4, 0x90 .LBB1_5: # %._crit_edge # in Loop: Header=BB1_2 Depth=1 movl $10, %edi callq putchar@PLT incq %rbp addl %ebx, %r13d cmpq 16(%rsp), %rbp # 8-byte Folded Reload je .LBB1_6 .LBB1_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_4 Depth 2 testl %ebx, %ebx jle .LBB1_5 # %bb.3: # %.lr.ph # in Loop: Header=BB1_2 Depth=1 movl %r13d, %eax movq 8(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_4: # Parent Loop BB1_2 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r15 cmpq %r15, %r12 jne .LBB1_4 jmp .LBB1_5 .LBB1_6: # %._crit_edge14 movl $10, %edi addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp putchar@PLT # TAILCALL .Lfunc_end1: .size _Z5printPfii, .Lfunc_end1-_Z5printPfii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $144, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl $400, %edi # imm = 0x190 callq malloc movq %rax, %rbx movl $400, %edi # imm = 0x190 callq malloc movq %rax, %r14 xorl %eax, %eax .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movss %xmm0, (%rbx,%rax,4) incq %rax cmpq $100, %rax jne .LBB2_1 # %bb.2: leaq 24(%rsp), %rdi leaq 40(%rsp), %r15 movl $40, %edx movl $10, %ecx movq %r15, %rsi callq hipMallocPitch leaq 16(%rsp), %rdi movl $40, %edx movl $10, %ecx movq %r15, %rsi callq hipMallocPitch movq 40(%rsp), %r15 movq 24(%rsp), %rdi movl $1, (%rsp) movl $40, %ecx movl $40, %r8d movl $10, %r9d movq %r15, %rsi movq %rbx, %rdx callq hipMemcpy2D movabsq $4294967297, %rdi # imm = 0x100000001 movabsq $68719476752, %rdx # imm = 0x1000000010 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_4 # %bb.3: shrq $2, %r15 movq 24(%rsp), %rax movq 16(%rsp), %rcx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movl %r15d, 36(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 36(%rsp), %rax movq %rax, 128(%rsp) leaq 80(%rsp), %rdi leaq 64(%rsp), %rsi leaq 56(%rsp), %rdx leaq 48(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rax movq 48(%rsp), %rdi movq 80(%rsp), %rsi movl 88(%rsp), %edx movq 64(%rsp), %rcx movl 72(%rsp), %r8d movq %rdi, 8(%rsp) movq %rax, (%rsp) leaq 112(%rsp), %r9 movl $_Z6kernelPfS_i, %edi callq hipLaunchKernel .LBB2_4: movq 16(%rsp), %rdx movq 40(%rsp), %rcx movl $2, (%rsp) movl $40, %esi movl $40, %r8d movl $10, %r9d movq %r14, %rdi callq hipMemcpy2D xorl %r15d, %r15d movq %rbx, %r12 .p2align 4, 0x90 .LBB2_5: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB2_6 Depth 2 xorl %r13d, %r13d .p2align 4, 0x90 .LBB2_6: # Parent Loop BB2_5 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r12,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r13 cmpq $10, %r13 jne .LBB2_6 # %bb.7: # %._crit_edge.i # in Loop: Header=BB2_5 Depth=1 movl $10, %edi callq putchar@PLT incq %r15 addq $40, %r12 cmpq $10, %r15 jne .LBB2_5 # %bb.8: # %_Z5printPfii.exit movl $10, %edi callq putchar@PLT xorl %r15d, %r15d movq %r14, %r12 .p2align 4, 0x90 .LBB2_9: # %.preheader.i20 # =>This Loop Header: Depth=1 # Child Loop BB2_10 Depth 2 xorl %r13d, %r13d .p2align 4, 0x90 .LBB2_10: # Parent Loop BB2_9 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r12,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r13 cmpq $10, %r13 jne .LBB2_10 # %bb.11: # %._crit_edge.i25 # in Loop: Header=BB2_9 Depth=1 movl $10, %edi callq putchar@PLT incq %r15 addq $40, %r12 cmpq $10, %r15 jne .LBB2_9 # %bb.12: # %_Z5printPfii.exit30 movl $10, %edi callq putchar@PLT movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq %rbx, %rdi callq free movq %r14, %rdi callq free xorl %eax, %eax addq $144, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6kernelPfS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z6kernelPfS_i,@object # @_Z6kernelPfS_i .section .rodata,"a",@progbits .globl _Z6kernelPfS_i .p2align 3, 0x0 _Z6kernelPfS_i: .quad _Z21__device_stub__kernelPfS_i .size _Z6kernelPfS_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%4.0f " .size .L.str, 7 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6kernelPfS_i" .size .L__unnamed_1, 15 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__kernelPfS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6kernelPfS_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
__global__ void vec_add_kernel(float *c, float *a, float *b, int n) { int i = 0; // Oops! Something is not right here, please fix it! if (i < n) { c[i] = a[i] + b[i]; } }
code for sm_80 Function : _Z14vec_add_kernelPfS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff007624 */ /* 0x000fca00078e00ff */ /*0020*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */ /* 0x000fda0003f06270 */ /*0030*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0040*/ MOV R2, c[0x0][0x170] ; /* 0x00005c0000027a02 */ /* 0x000fe20000000f00 */ /*0050*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff057624 */ /* 0x000fe200078e00ff */ /*0060*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */ /* 0x000fe20000000f00 */ /*0070*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff037624 */ /* 0x000fe200078e00ff */ /*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea8000c1e1900 */ /*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*00b0*/ MOV R6, c[0x0][0x160] ; /* 0x0000580000067a02 */ /* 0x000fe40000000f00 */ /*00c0*/ MOV R7, c[0x0][0x164] ; /* 0x0000590000077a02 */ /* 0x000fe20000000f00 */ /*00d0*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */ /* 0x004fca0000000000 */ /*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
__global__ void vec_add_kernel(float *c, float *a, float *b, int n) { int i = 0; // Oops! Something is not right here, please fix it! if (i < n) { c[i] = a[i] + b[i]; } }
.file "tmpxft_000e5502_00000000-6_vector-add-kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z39__device_stub__Z14vec_add_kernelPfS_S_iPfS_S_i .type _Z39__device_stub__Z14vec_add_kernelPfS_S_iPfS_S_i, @function _Z39__device_stub__Z14vec_add_kernelPfS_S_iPfS_S_i: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14vec_add_kernelPfS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z39__device_stub__Z14vec_add_kernelPfS_S_iPfS_S_i, .-_Z39__device_stub__Z14vec_add_kernelPfS_S_iPfS_S_i .globl _Z14vec_add_kernelPfS_S_i .type _Z14vec_add_kernelPfS_S_i, @function _Z14vec_add_kernelPfS_S_i: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z14vec_add_kernelPfS_S_iPfS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z14vec_add_kernelPfS_S_i, .-_Z14vec_add_kernelPfS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z14vec_add_kernelPfS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z14vec_add_kernelPfS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
__global__ void vec_add_kernel(float *c, float *a, float *b, int n) { int i = 0; // Oops! Something is not right here, please fix it! if (i < n) { c[i] = a[i] + b[i]; } }
#include <hip/hip_runtime.h> __global__ void vec_add_kernel(float *c, float *a, float *b, int n) { int i = 0; // Oops! Something is not right here, please fix it! if (i < n) { c[i] = a[i] + b[i]; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> __global__ void vec_add_kernel(float *c, float *a, float *b, int n) { int i = 0; // Oops! Something is not right here, please fix it! if (i < n) { c[i] = a[i] + b[i]; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z14vec_add_kernelPfS_S_i .globl _Z14vec_add_kernelPfS_S_i .p2align 8 .type _Z14vec_add_kernelPfS_S_i,@function _Z14vec_add_kernelPfS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB0_2 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_mov_b32_e32 v0, 0 s_waitcnt lgkmcnt(0) s_load_b32 s2, s[6:7], 0x0 s_load_b32 s0, s[0:1], 0x0 s_waitcnt lgkmcnt(0) v_add_f32_e64 v1, s2, s0 global_store_b32 v0, v1, s[4:5] .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14vec_add_kernelPfS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 2 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z14vec_add_kernelPfS_S_i, .Lfunc_end0-_Z14vec_add_kernelPfS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14vec_add_kernelPfS_S_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z14vec_add_kernelPfS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 2 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> __global__ void vec_add_kernel(float *c, float *a, float *b, int n) { int i = 0; // Oops! Something is not right here, please fix it! if (i < n) { c[i] = a[i] + b[i]; } }
.text .file "vector-add-kernel.hip" .globl _Z29__device_stub__vec_add_kernelPfS_S_i # -- Begin function _Z29__device_stub__vec_add_kernelPfS_S_i .p2align 4, 0x90 .type _Z29__device_stub__vec_add_kernelPfS_S_i,@function _Z29__device_stub__vec_add_kernelPfS_S_i: # @_Z29__device_stub__vec_add_kernelPfS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14vec_add_kernelPfS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z29__device_stub__vec_add_kernelPfS_S_i, .Lfunc_end0-_Z29__device_stub__vec_add_kernelPfS_S_i .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14vec_add_kernelPfS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z14vec_add_kernelPfS_S_i,@object # @_Z14vec_add_kernelPfS_S_i .section .rodata,"a",@progbits .globl _Z14vec_add_kernelPfS_S_i .p2align 3, 0x0 _Z14vec_add_kernelPfS_S_i: .quad _Z29__device_stub__vec_add_kernelPfS_S_i .size _Z14vec_add_kernelPfS_S_i, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z14vec_add_kernelPfS_S_i" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z29__device_stub__vec_add_kernelPfS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z14vec_add_kernelPfS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z14vec_add_kernelPfS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff007624 */ /* 0x000fca00078e00ff */ /*0020*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */ /* 0x000fda0003f06270 */ /*0030*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0040*/ MOV R2, c[0x0][0x170] ; /* 0x00005c0000027a02 */ /* 0x000fe20000000f00 */ /*0050*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff057624 */ /* 0x000fe200078e00ff */ /*0060*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */ /* 0x000fe20000000f00 */ /*0070*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff037624 */ /* 0x000fe200078e00ff */ /*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea8000c1e1900 */ /*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*00b0*/ MOV R6, c[0x0][0x160] ; /* 0x0000580000067a02 */ /* 0x000fe40000000f00 */ /*00c0*/ MOV R7, c[0x0][0x164] ; /* 0x0000590000077a02 */ /* 0x000fe20000000f00 */ /*00d0*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */ /* 0x004fca0000000000 */ /*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z14vec_add_kernelPfS_S_i .globl _Z14vec_add_kernelPfS_S_i .p2align 8 .type _Z14vec_add_kernelPfS_S_i,@function _Z14vec_add_kernelPfS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB0_2 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_mov_b32_e32 v0, 0 s_waitcnt lgkmcnt(0) s_load_b32 s2, s[6:7], 0x0 s_load_b32 s0, s[0:1], 0x0 s_waitcnt lgkmcnt(0) v_add_f32_e64 v1, s2, s0 global_store_b32 v0, v1, s[4:5] .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14vec_add_kernelPfS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 2 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z14vec_add_kernelPfS_S_i, .Lfunc_end0-_Z14vec_add_kernelPfS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14vec_add_kernelPfS_S_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z14vec_add_kernelPfS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 2 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000e5502_00000000-6_vector-add-kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z39__device_stub__Z14vec_add_kernelPfS_S_iPfS_S_i .type _Z39__device_stub__Z14vec_add_kernelPfS_S_iPfS_S_i, @function _Z39__device_stub__Z14vec_add_kernelPfS_S_iPfS_S_i: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14vec_add_kernelPfS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z39__device_stub__Z14vec_add_kernelPfS_S_iPfS_S_i, .-_Z39__device_stub__Z14vec_add_kernelPfS_S_iPfS_S_i .globl _Z14vec_add_kernelPfS_S_i .type _Z14vec_add_kernelPfS_S_i, @function _Z14vec_add_kernelPfS_S_i: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z14vec_add_kernelPfS_S_iPfS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z14vec_add_kernelPfS_S_i, .-_Z14vec_add_kernelPfS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z14vec_add_kernelPfS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z14vec_add_kernelPfS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "vector-add-kernel.hip" .globl _Z29__device_stub__vec_add_kernelPfS_S_i # -- Begin function _Z29__device_stub__vec_add_kernelPfS_S_i .p2align 4, 0x90 .type _Z29__device_stub__vec_add_kernelPfS_S_i,@function _Z29__device_stub__vec_add_kernelPfS_S_i: # @_Z29__device_stub__vec_add_kernelPfS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14vec_add_kernelPfS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z29__device_stub__vec_add_kernelPfS_S_i, .Lfunc_end0-_Z29__device_stub__vec_add_kernelPfS_S_i .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14vec_add_kernelPfS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z14vec_add_kernelPfS_S_i,@object # @_Z14vec_add_kernelPfS_S_i .section .rodata,"a",@progbits .globl _Z14vec_add_kernelPfS_S_i .p2align 3, 0x0 _Z14vec_add_kernelPfS_S_i: .quad _Z29__device_stub__vec_add_kernelPfS_S_i .size _Z14vec_add_kernelPfS_S_i, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z14vec_add_kernelPfS_S_i" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z29__device_stub__vec_add_kernelPfS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z14vec_add_kernelPfS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
extern "C" __global__ void sampleKernel(float** globalInputData, int size, float* globalOutputData) { const unsigned int tidX = threadIdx.x; globalOutputData[tidX] = 0; for (int i=0; i<size; i++) { globalOutputData[tidX] += globalInputData[tidX][i]; } __syncthreads(); }
code for sm_80 Function : sampleKernel .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e220000002100 */ /*0020*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff067624 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */ /* 0x000fe20000000a00 */ /*0040*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fc600078e00ff */ /*0050*/ ISETP.GE.AND P0, PT, R6, 0x1, PT ; /* 0x000000010600780c */ /* 0x000fe20003f06270 */ /*0060*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x170] ; /* 0x00005c0000027625 */ /* 0x001fca00078e0003 */ /*0070*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */ /* 0x0001ee000c101908 */ /*0080*/ @!P0 BRA 0xe70 ; /* 0x00000de000008947 */ /* 0x000fea0003800000 */ /*0090*/ IADD3 R5, R6, -0x1, RZ ; /* 0xffffffff06057810 */ /* 0x000fe20007ffe0ff */ /*00a0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fe20008000000 */ /*00b0*/ LEA R4, P1, R0, c[0x0][0x160], 0x3 ; /* 0x0000580000047a11 */ /* 0x000fe200078218ff */ /*00c0*/ IMAD.MOV.U32 R13, RZ, RZ, RZ ; /* 0x000000ffff0d7224 */ /* 0x000fe200078e00ff */ /*00d0*/ ISETP.GE.U32.AND P0, PT, R5, 0x3, PT ; /* 0x000000030500780c */ /* 0x000fc40003f06070 */ /*00e0*/ LEA.HI.X R5, R0, c[0x0][0x164], RZ, 0x3, P1 ; /* 0x0000590000057a11 */ /* 0x000fe400008f1cff */ /*00f0*/ LOP3.LUT R0, R6, 0x3, RZ, 0xc0, !PT ; /* 0x0000000306007812 */ /* 0x000fd200078ec0ff */ /*0100*/ @!P0 BRA 0xd90 ; /* 0x00000c8000008947 */ /* 0x000fea0003800000 */ /*0110*/ IADD3 R6, -R0, c[0x0][0x168], RZ ; /* 0x00005a0000067a10 */ /* 0x000fe20007ffe1ff */ /*0120*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fe20008000000 */ /*0130*/ IMAD.MOV.U32 R13, RZ, RZ, RZ ; /* 0x000000ffff0d7224 */ /* 0x000fe200078e00ff */ /*0140*/ UMOV UR5, URZ ; /* 0x0000003f00057c82 */ /* 0x000fe20008000000 */ /*0150*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe20003f04270 */ /*0160*/ UMOV UR6, URZ ; /* 0x0000003f00067c82 */ /* 0x000fd80008000000 */ /*0170*/ @!P0 BRA 0xbb0 ; /* 0x00000a3000008947 */ /* 0x000fea0003800000 */ /*0180*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */ /* 0x000fe40003f24270 */ /*0190*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*01a0*/ @!P1 BRA 0x820 ; /* 0x0000067000009947 */ /* 0x000fea0003800000 */ /*01b0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*01c0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x004ea4000c1e1b00 */ /*01d0*/ IADD3 R10, P1, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x004fc8000ff3e0ff */ /*01e0*/ IADD3.X R11, R9, UR6, RZ, P1, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca0008ffe4ff */ /*01f0*/ LDG.E R10, [R10.64] ; /* 0x000000080a0a7981 */ /* 0x000ea4000c1e1900 */ /*0200*/ FADD R7, R10, R13 ; /* 0x0000000d0a077221 */ /* 0x004fca0000000000 */ /*0210*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*0220*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ea4000c1e1b00 */ /*0230*/ IADD3 R12, P1, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x004fc8000ff3e0ff */ /*0240*/ IADD3.X R13, R9, UR6, RZ, P1, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca0008ffe4ff */ /*0250*/ LDG.E R12, [R12.64+0x4] ; /* 0x000004080c0c7981 */ /* 0x000ea4000c1e1900 */ /*0260*/ FADD R17, R7, R12 ; /* 0x0000000c07117221 */ /* 0x004fca0000000000 */ /*0270*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0280*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0290*/ IADD3 R14, P1, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff3e0ff */ /*02a0*/ IADD3.X R15, R9, UR6, RZ, P1, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca0008ffe4ff */ /*02b0*/ LDG.E R14, [R14.64+0x8] ; /* 0x000008080e0e7981 */ /* 0x000e64000c1e1900 */ /*02c0*/ FADD R7, R17, R14 ; /* 0x0000000e11077221 */ /* 0x002fca0000000000 */ /*02d0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*02e0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*02f0*/ IADD3 R10, P1, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff3e0ff */ /*0300*/ IADD3.X R11, R9, UR6, RZ, P1, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca0008ffe4ff */ /*0310*/ LDG.E R10, [R10.64+0xc] ; /* 0x00000c080a0a7981 */ /* 0x000ea4000c1e1900 */ /*0320*/ FADD R17, R7, R10 ; /* 0x0000000a07117221 */ /* 0x004fca0000000000 */ /*0330*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0340*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0350*/ IADD3 R12, P1, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x008fc8000ff3e0ff */ /*0360*/ IADD3.X R13, R9, UR6, RZ, P1, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca0008ffe4ff */ /*0370*/ LDG.E R12, [R12.64+0x10] ; /* 0x000010080c0c7981 */ /* 0x000e64000c1e1900 */ /*0380*/ FADD R7, R17, R12 ; /* 0x0000000c11077221 */ /* 0x002fca0000000000 */ /*0390*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*03a0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*03b0*/ IADD3 R14, P1, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff3e0ff */ /*03c0*/ IADD3.X R15, R9, UR6, RZ, P1, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca0008ffe4ff */ /*03d0*/ LDG.E R14, [R14.64+0x14] ; /* 0x000014080e0e7981 */ /* 0x000ea4000c1e1900 */ /*03e0*/ FADD R17, R7, R14 ; /* 0x0000000e07117221 */ /* 0x004fca0000000000 */ /*03f0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0400*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0410*/ IADD3 R10, P1, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff3e0ff */ /*0420*/ IADD3.X R11, R9, UR6, RZ, P1, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca0008ffe4ff */ /*0430*/ LDG.E R10, [R10.64+0x18] ; /* 0x000018080a0a7981 */ /* 0x000e64000c1e1900 */ /*0440*/ FADD R7, R17, R10 ; /* 0x0000000a11077221 */ /* 0x002fca0000000000 */ /*0450*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*0460*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0470*/ IADD3 R12, P1, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x008fc8000ff3e0ff */ /*0480*/ IADD3.X R13, R9, UR6, RZ, P1, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca0008ffe4ff */ /*0490*/ LDG.E R12, [R12.64+0x1c] ; /* 0x00001c080c0c7981 */ /* 0x000ea4000c1e1900 */ /*04a0*/ FADD R17, R7, R12 ; /* 0x0000000c07117221 */ /* 0x004fca0000000000 */ /*04b0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*04c0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*04d0*/ IADD3 R14, P1, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff3e0ff */ /*04e0*/ IADD3.X R15, R9, UR6, RZ, P1, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca0008ffe4ff */ /*04f0*/ LDG.E R14, [R14.64+0x20] ; /* 0x000020080e0e7981 */ /* 0x000e64000c1e1900 */ /*0500*/ FADD R7, R17, R14 ; /* 0x0000000e11077221 */ /* 0x002fca0000000000 */ /*0510*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*0520*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0530*/ IADD3 R10, P1, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff3e0ff */ /*0540*/ IADD3.X R11, R9, UR6, RZ, P1, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca0008ffe4ff */ /*0550*/ LDG.E R10, [R10.64+0x24] ; /* 0x000024080a0a7981 */ /* 0x000ea4000c1e1900 */ /*0560*/ FADD R17, R7, R10 ; /* 0x0000000a07117221 */ /* 0x004fca0000000000 */ /*0570*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0580*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0590*/ IADD3 R12, P1, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x008fc8000ff3e0ff */ /*05a0*/ IADD3.X R13, R9, UR6, RZ, P1, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca0008ffe4ff */ /*05b0*/ LDG.E R12, [R12.64+0x28] ; /* 0x000028080c0c7981 */ /* 0x000e64000c1e1900 */ /*05c0*/ FADD R7, R17, R12 ; /* 0x0000000c11077221 */ /* 0x002fca0000000000 */ /*05d0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*05e0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*05f0*/ IADD3 R14, P1, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff3e0ff */ /*0600*/ IADD3.X R15, R9, UR6, RZ, P1, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca0008ffe4ff */ /*0610*/ LDG.E R14, [R14.64+0x2c] ; /* 0x00002c080e0e7981 */ /* 0x000ea4000c1e1900 */ /*0620*/ FADD R17, R7, R14 ; /* 0x0000000e07117221 */ /* 0x004fca0000000000 */ /*0630*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0640*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0650*/ IADD3 R10, P1, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff3e0ff */ /*0660*/ IADD3.X R11, R9, UR6, RZ, P1, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca0008ffe4ff */ /*0670*/ LDG.E R10, [R10.64+0x30] ; /* 0x000030080a0a7981 */ /* 0x000e64000c1e1900 */ /*0680*/ FADD R7, R17, R10 ; /* 0x0000000a11077221 */ /* 0x002fca0000000000 */ /*0690*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*06a0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*06b0*/ IADD3 R12, P1, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x008fc8000ff3e0ff */ /*06c0*/ IADD3.X R13, R9, UR6, RZ, P1, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca0008ffe4ff */ /*06d0*/ LDG.E R12, [R12.64+0x34] ; /* 0x000034080c0c7981 */ /* 0x000ea4000c1e1900 */ /*06e0*/ FADD R17, R7, R12 ; /* 0x0000000c07117221 */ /* 0x004fca0000000000 */ /*06f0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0700*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0710*/ IADD3 R14, P1, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff3e0ff */ /*0720*/ IADD3.X R15, R9, UR6, RZ, P1, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca0008ffe4ff */ /*0730*/ LDG.E R14, [R14.64+0x38] ; /* 0x000038080e0e7981 */ /* 0x000e64000c1e1900 */ /*0740*/ FADD R7, R17, R14 ; /* 0x0000000e11077221 */ /* 0x002fca0000000000 */ /*0750*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0005e8000c101908 */ /*0760*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee2000c1e1b00 */ /*0770*/ IADD3 R6, R6, -0x10, RZ ; /* 0xfffffff006067810 */ /* 0x000fe40007ffe0ff */ /*0780*/ IADD3 R8, P1, R8, UR5, RZ ; /* 0x0000000508087c10 */ /* 0x008fc8000ff3e0ff */ /*0790*/ IADD3.X R9, R9, UR6, RZ, P1, !PT ; /* 0x0000000609097c10 */ /* 0x000fca0008ffe4ff */ /*07a0*/ LDG.E R8, [R8.64+0x3c] ; /* 0x00003c0808087981 */ /* 0x000ee2000c1e1900 */ /*07b0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */ /* 0x000fe20003f24270 */ /*07c0*/ UIADD3 UR5, UP0, UR5, 0x40, URZ ; /* 0x0000004005057890 */ /* 0x000fe4000ff1e03f */ /*07d0*/ UIADD3 UR4, UR4, 0x10, URZ ; /* 0x0000001004047890 */ /* 0x000fe4000fffe03f */ /*07e0*/ UIADD3.X UR6, URZ, UR6, URZ, UP0, !UPT ; /* 0x000000063f067290 */ /* 0x000fe200087fe43f */ /*07f0*/ FADD R13, R7, R8 ; /* 0x00000008070d7221 */ /* 0x008fca0000000000 */ /*0800*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */ /* 0x0005e4000c101908 */ /*0810*/ @P1 BRA 0x1c0 ; /* 0xfffff9a000001947 */ /* 0x000fea000383ffff */ /*0820*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */ /* 0x000fda0003f24270 */ /*0830*/ @!P1 BRA 0xb90 ; /* 0x0000035000009947 */ /* 0x000fea0003800000 */ /*0840*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0850*/ IADD3 R10, P0, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff1e0ff */ /*0860*/ IADD3.X R11, R9, UR6, RZ, P0, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca00087fe4ff */ /*0870*/ LDG.E R10, [R10.64] ; /* 0x000000080a0a7981 */ /* 0x000ee4000c1e1900 */ /*0880*/ FADD R7, R13, R10 ; /* 0x0000000a0d077221 */ /* 0x00cfca0000000000 */ /*0890*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*08a0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ea4000c1e1b00 */ /*08b0*/ IADD3 R12, P0, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x004fc8000ff1e0ff */ /*08c0*/ IADD3.X R13, R9, UR6, RZ, P0, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca00087fe4ff */ /*08d0*/ LDG.E R12, [R12.64+0x4] ; /* 0x000004080c0c7981 */ /* 0x000ea4000c1e1900 */ /*08e0*/ FADD R17, R7, R12 ; /* 0x0000000c07117221 */ /* 0x004fca0000000000 */ /*08f0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0900*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0910*/ IADD3 R14, P0, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff1e0ff */ /*0920*/ IADD3.X R15, R9, UR6, RZ, P0, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca00087fe4ff */ /*0930*/ LDG.E R14, [R14.64+0x8] ; /* 0x000008080e0e7981 */ /* 0x000e64000c1e1900 */ /*0940*/ FADD R7, R17, R14 ; /* 0x0000000e11077221 */ /* 0x002fca0000000000 */ /*0950*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*0960*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0970*/ IADD3 R10, P0, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff1e0ff */ /*0980*/ IADD3.X R11, R9, UR6, RZ, P0, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca00087fe4ff */ /*0990*/ LDG.E R10, [R10.64+0xc] ; /* 0x00000c080a0a7981 */ /* 0x000ea4000c1e1900 */ /*09a0*/ FADD R17, R7, R10 ; /* 0x0000000a07117221 */ /* 0x004fca0000000000 */ /*09b0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*09c0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*09d0*/ IADD3 R12, P0, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x008fc8000ff1e0ff */ /*09e0*/ IADD3.X R13, R9, UR6, RZ, P0, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca00087fe4ff */ /*09f0*/ LDG.E R12, [R12.64+0x10] ; /* 0x000010080c0c7981 */ /* 0x000e64000c1e1900 */ /*0a00*/ FADD R7, R17, R12 ; /* 0x0000000c11077221 */ /* 0x002fca0000000000 */ /*0a10*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*0a20*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0a30*/ IADD3 R14, P0, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff1e0ff */ /*0a40*/ IADD3.X R15, R9, UR6, RZ, P0, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca00087fe4ff */ /*0a50*/ LDG.E R14, [R14.64+0x14] ; /* 0x000014080e0e7981 */ /* 0x000ea4000c1e1900 */ /*0a60*/ FADD R17, R7, R14 ; /* 0x0000000e07117221 */ /* 0x004fca0000000000 */ /*0a70*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0a80*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0a90*/ IADD3 R10, P0, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff1e0ff */ /*0aa0*/ IADD3.X R11, R9, UR6, RZ, P0, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca00087fe4ff */ /*0ab0*/ LDG.E R10, [R10.64+0x18] ; /* 0x000018080a0a7981 */ /* 0x000e64000c1e1900 */ /*0ac0*/ FADD R7, R17, R10 ; /* 0x0000000a11077221 */ /* 0x002fca0000000000 */ /*0ad0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0005e8000c101908 */ /*0ae0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0af0*/ IADD3 R8, P0, R8, UR5, RZ ; /* 0x0000000508087c10 */ /* 0x008fc8000ff1e0ff */ /*0b00*/ IADD3.X R9, R9, UR6, RZ, P0, !PT ; /* 0x0000000609097c10 */ /* 0x000fca00087fe4ff */ /*0b10*/ LDG.E R8, [R8.64+0x1c] ; /* 0x00001c0808087981 */ /* 0x000ee2000c1e1900 */ /*0b20*/ UIADD3 UR5, UP0, UR5, 0x20, URZ ; /* 0x0000002005057890 */ /* 0x000fe2000ff1e03f */ /*0b30*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe20003f0e170 */ /*0b40*/ UIADD3 UR4, UR4, 0x8, URZ ; /* 0x0000000804047890 */ /* 0x000fe2000fffe03f */ /*0b50*/ IADD3 R6, R6, -0x8, RZ ; /* 0xfffffff806067810 */ /* 0x000fe20007ffe0ff */ /*0b60*/ UIADD3.X UR6, URZ, UR6, URZ, UP0, !UPT ; /* 0x000000063f067290 */ /* 0x000fe200087fe43f */ /*0b70*/ FADD R13, R7, R8 ; /* 0x00000008070d7221 */ /* 0x008fca0000000000 */ /*0b80*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */ /* 0x0005e8000c101908 */ /*0b90*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */ /* 0x000fda0000705670 */ /*0ba0*/ @!P0 BRA 0xd90 ; /* 0x000001e000008947 */ /* 0x000fea0003800000 */ /*0bb0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0bc0*/ IADD3 R10, P0, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff1e0ff */ /*0bd0*/ IADD3.X R11, R9, UR6, RZ, P0, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca00087fe4ff */ /*0be0*/ LDG.E R10, [R10.64] ; /* 0x000000080a0a7981 */ /* 0x000ee4000c1e1900 */ /*0bf0*/ FADD R7, R10, R13 ; /* 0x0000000d0a077221 */ /* 0x00cfca0000000000 */ /*0c00*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*0c10*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ea4000c1e1b00 */ /*0c20*/ IADD3 R12, P0, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x004fc8000ff1e0ff */ /*0c30*/ IADD3.X R13, R9, UR6, RZ, P0, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca00087fe4ff */ /*0c40*/ LDG.E R12, [R12.64+0x4] ; /* 0x000004080c0c7981 */ /* 0x000ea4000c1e1900 */ /*0c50*/ FADD R17, R7, R12 ; /* 0x0000000c07117221 */ /* 0x004fca0000000000 */ /*0c60*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0c70*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0c80*/ IADD3 R14, P0, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff1e0ff */ /*0c90*/ IADD3.X R15, R9, UR6, RZ, P0, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca00087fe4ff */ /*0ca0*/ LDG.E R14, [R14.64+0x8] ; /* 0x000008080e0e7981 */ /* 0x000e64000c1e1900 */ /*0cb0*/ FADD R7, R17, R14 ; /* 0x0000000e11077221 */ /* 0x002fca0000000000 */ /*0cc0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0005e8000c101908 */ /*0cd0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee2000c1e1b00 */ /*0ce0*/ IADD3 R6, R6, -0x4, RZ ; /* 0xfffffffc06067810 */ /* 0x000fe40007ffe0ff */ /*0cf0*/ IADD3 R8, P0, R8, UR5, RZ ; /* 0x0000000508087c10 */ /* 0x008fc8000ff1e0ff */ /*0d00*/ IADD3.X R9, R9, UR6, RZ, P0, !PT ; /* 0x0000000609097c10 */ /* 0x000fca00087fe4ff */ /*0d10*/ LDG.E R8, [R8.64+0xc] ; /* 0x00000c0808087981 */ /* 0x000ee2000c1e1900 */ /*0d20*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe20003f05270 */ /*0d30*/ UIADD3 UR5, UP0, UR5, 0x10, URZ ; /* 0x0000001005057890 */ /* 0x000fe4000ff1e03f */ /*0d40*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */ /* 0x000fe4000fffe03f */ /*0d50*/ UIADD3.X UR6, URZ, UR6, URZ, UP0, !UPT ; /* 0x000000063f067290 */ /* 0x000fe200087fe43f */ /*0d60*/ FADD R13, R7, R8 ; /* 0x00000008070d7221 */ /* 0x008fca0000000000 */ /*0d70*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */ /* 0x0005e4000c101908 */ /*0d80*/ @P0 BRA 0xbb0 ; /* 0xfffffe2000000947 */ /* 0x005fea000383ffff */ /*0d90*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fda0003f05270 */ /*0da0*/ @!P0 BRA 0xe70 ; /* 0x000000c000008947 */ /* 0x000fea0003800000 */ /*0db0*/ UIMAD.WIDE UR4, UR4, 0x4, URZ ; /* 0x00000004040478a5 */ /* 0x000fc6000f8e023f */ /*0dc0*/ LDG.E.64 R6, [R4.64] ; /* 0x0000000804067981 */ /* 0x004ea2000c1e1b00 */ /*0dd0*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */ /* 0x000fe40007ffe0ff */ /*0de0*/ IADD3 R6, P0, R6, UR4, RZ ; /* 0x0000000406067c10 */ /* 0x004fc8000ff1e0ff */ /*0df0*/ IADD3.X R7, R7, UR5, RZ, P0, !PT ; /* 0x0000000507077c10 */ /* 0x000fca00087fe4ff */ /*0e00*/ LDG.E R6, [R6.64] ; /* 0x0000000806067981 */ /* 0x000ea2000c1e1900 */ /*0e10*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fe20003f05270 */ /*0e20*/ UIADD3 UR4, UP0, UR4, 0x4, URZ ; /* 0x0000000404047890 */ /* 0x000fc8000ff1e03f */ /*0e30*/ UIADD3.X UR5, URZ, UR5, URZ, UP0, !UPT ; /* 0x000000053f057290 */ /* 0x000fe200087fe43f */ /*0e40*/ FADD R13, R6, R13 ; /* 0x0000000d060d7221 */ /* 0x006fca0000000000 */ /*0e50*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */ /* 0x0003e4000c101908 */ /*0e60*/ @P0 BRA 0xdc0 ; /* 0xffffff5000000947 */ /* 0x000fea000383ffff */ /*0e70*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0e80*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0e90*/ BRA 0xe90; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0ea0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0eb0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ec0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ed0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ee0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ef0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
extern "C" __global__ void sampleKernel(float** globalInputData, int size, float* globalOutputData) { const unsigned int tidX = threadIdx.x; globalOutputData[tidX] = 0; for (int i=0; i<size; i++) { globalOutputData[tidX] += globalInputData[tidX][i]; } __syncthreads(); }
.file "tmpxft_000d5526_00000000-6_JCudaSampleKernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z36__device_stub__Z12sampleKernelPPfiS_PPfiS_ .type _Z36__device_stub__Z12sampleKernelPPfiS_PPfiS_, @function _Z36__device_stub__Z12sampleKernelPPfiS_PPfiS_: .LFB2051: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq sampleKernel(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z36__device_stub__Z12sampleKernelPPfiS_PPfiS_, .-_Z36__device_stub__Z12sampleKernelPPfiS_PPfiS_ .globl sampleKernel .type sampleKernel, @function sampleKernel: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z12sampleKernelPPfiS_PPfiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size sampleKernel, .-sampleKernel .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "sampleKernel" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq sampleKernel(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
extern "C" __global__ void sampleKernel(float** globalInputData, int size, float* globalOutputData) { const unsigned int tidX = threadIdx.x; globalOutputData[tidX] = 0; for (int i=0; i<size; i++) { globalOutputData[tidX] += globalInputData[tidX][i]; } __syncthreads(); }
#include <hip/hip_runtime.h> extern "C" __global__ void sampleKernel(float** globalInputData, int size, float* globalOutputData) { const unsigned int tidX = threadIdx.x; globalOutputData[tidX] = 0; for (int i=0; i<size; i++) { globalOutputData[tidX] += globalInputData[tidX][i]; } __syncthreads(); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> extern "C" __global__ void sampleKernel(float** globalInputData, int size, float* globalOutputData) { const unsigned int tidX = threadIdx.x; globalOutputData[tidX] = 0; for (int i=0; i<size; i++) { globalOutputData[tidX] += globalInputData[tidX][i]; } __syncthreads(); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected sampleKernel .globl sampleKernel .p2align 8 .type sampleKernel,@function sampleKernel: s_clause 0x1 s_load_b32 s4, s[0:1], 0x8 s_load_b64 s[2:3], s[0:1], 0x10 v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 2, v0 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s4, 1 global_store_b32 v1, v2, s[2:3] s_cbranch_scc1 .LBB0_3 s_load_b64 s[0:1], s[0:1], 0x0 v_lshlrev_b32_e32 v2, 3, v0 v_add_co_u32 v0, s2, s2, v1 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v1, null, s3, 0, s2 s_waitcnt lgkmcnt(0) global_load_b64 v[2:3], v2, s[0:1] global_load_b32 v4, v[0:1], off .LBB0_2: s_waitcnt vmcnt(1) flat_load_b32 v5, v[2:3] v_add_co_u32 v2, vcc_lo, v2, 4 v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo s_add_i32 s4, s4, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s4, 0 s_waitcnt vmcnt(0) lgkmcnt(0) v_add_f32_e32 v4, v5, v4 global_store_b32 v[0:1], v4, off s_cbranch_scc0 .LBB0_2 .LBB0_3: s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel sampleKernel .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 5 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size sampleKernel, .Lfunc_end0-sampleKernel .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: sampleKernel .private_segment_fixed_size: 0 .sgpr_count: 7 .sgpr_spill_count: 0 .symbol: sampleKernel.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> extern "C" __global__ void sampleKernel(float** globalInputData, int size, float* globalOutputData) { const unsigned int tidX = threadIdx.x; globalOutputData[tidX] = 0; for (int i=0; i<size; i++) { globalOutputData[tidX] += globalInputData[tidX][i]; } __syncthreads(); }
.text .file "JCudaSampleKernel.hip" .globl __device_stub__sampleKernel # -- Begin function __device_stub__sampleKernel .p2align 4, 0x90 .type __device_stub__sampleKernel,@function __device_stub__sampleKernel: # @__device_stub__sampleKernel .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movq %rdx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $sampleKernel, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size __device_stub__sampleKernel, .Lfunc_end0-__device_stub__sampleKernel .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $sampleKernel, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type sampleKernel,@object # @sampleKernel .section .rodata,"a",@progbits .globl sampleKernel .p2align 3, 0x0 sampleKernel: .quad __device_stub__sampleKernel .size sampleKernel, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "sampleKernel" .size .L__unnamed_1, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__sampleKernel .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym sampleKernel .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : sampleKernel .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e220000002100 */ /*0020*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff067624 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */ /* 0x000fe20000000a00 */ /*0040*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fc600078e00ff */ /*0050*/ ISETP.GE.AND P0, PT, R6, 0x1, PT ; /* 0x000000010600780c */ /* 0x000fe20003f06270 */ /*0060*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x170] ; /* 0x00005c0000027625 */ /* 0x001fca00078e0003 */ /*0070*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */ /* 0x0001ee000c101908 */ /*0080*/ @!P0 BRA 0xe70 ; /* 0x00000de000008947 */ /* 0x000fea0003800000 */ /*0090*/ IADD3 R5, R6, -0x1, RZ ; /* 0xffffffff06057810 */ /* 0x000fe20007ffe0ff */ /*00a0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fe20008000000 */ /*00b0*/ LEA R4, P1, R0, c[0x0][0x160], 0x3 ; /* 0x0000580000047a11 */ /* 0x000fe200078218ff */ /*00c0*/ IMAD.MOV.U32 R13, RZ, RZ, RZ ; /* 0x000000ffff0d7224 */ /* 0x000fe200078e00ff */ /*00d0*/ ISETP.GE.U32.AND P0, PT, R5, 0x3, PT ; /* 0x000000030500780c */ /* 0x000fc40003f06070 */ /*00e0*/ LEA.HI.X R5, R0, c[0x0][0x164], RZ, 0x3, P1 ; /* 0x0000590000057a11 */ /* 0x000fe400008f1cff */ /*00f0*/ LOP3.LUT R0, R6, 0x3, RZ, 0xc0, !PT ; /* 0x0000000306007812 */ /* 0x000fd200078ec0ff */ /*0100*/ @!P0 BRA 0xd90 ; /* 0x00000c8000008947 */ /* 0x000fea0003800000 */ /*0110*/ IADD3 R6, -R0, c[0x0][0x168], RZ ; /* 0x00005a0000067a10 */ /* 0x000fe20007ffe1ff */ /*0120*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fe20008000000 */ /*0130*/ IMAD.MOV.U32 R13, RZ, RZ, RZ ; /* 0x000000ffff0d7224 */ /* 0x000fe200078e00ff */ /*0140*/ UMOV UR5, URZ ; /* 0x0000003f00057c82 */ /* 0x000fe20008000000 */ /*0150*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe20003f04270 */ /*0160*/ UMOV UR6, URZ ; /* 0x0000003f00067c82 */ /* 0x000fd80008000000 */ /*0170*/ @!P0 BRA 0xbb0 ; /* 0x00000a3000008947 */ /* 0x000fea0003800000 */ /*0180*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */ /* 0x000fe40003f24270 */ /*0190*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*01a0*/ @!P1 BRA 0x820 ; /* 0x0000067000009947 */ /* 0x000fea0003800000 */ /*01b0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*01c0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x004ea4000c1e1b00 */ /*01d0*/ IADD3 R10, P1, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x004fc8000ff3e0ff */ /*01e0*/ IADD3.X R11, R9, UR6, RZ, P1, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca0008ffe4ff */ /*01f0*/ LDG.E R10, [R10.64] ; /* 0x000000080a0a7981 */ /* 0x000ea4000c1e1900 */ /*0200*/ FADD R7, R10, R13 ; /* 0x0000000d0a077221 */ /* 0x004fca0000000000 */ /*0210*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*0220*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ea4000c1e1b00 */ /*0230*/ IADD3 R12, P1, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x004fc8000ff3e0ff */ /*0240*/ IADD3.X R13, R9, UR6, RZ, P1, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca0008ffe4ff */ /*0250*/ LDG.E R12, [R12.64+0x4] ; /* 0x000004080c0c7981 */ /* 0x000ea4000c1e1900 */ /*0260*/ FADD R17, R7, R12 ; /* 0x0000000c07117221 */ /* 0x004fca0000000000 */ /*0270*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0280*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0290*/ IADD3 R14, P1, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff3e0ff */ /*02a0*/ IADD3.X R15, R9, UR6, RZ, P1, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca0008ffe4ff */ /*02b0*/ LDG.E R14, [R14.64+0x8] ; /* 0x000008080e0e7981 */ /* 0x000e64000c1e1900 */ /*02c0*/ FADD R7, R17, R14 ; /* 0x0000000e11077221 */ /* 0x002fca0000000000 */ /*02d0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*02e0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*02f0*/ IADD3 R10, P1, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff3e0ff */ /*0300*/ IADD3.X R11, R9, UR6, RZ, P1, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca0008ffe4ff */ /*0310*/ LDG.E R10, [R10.64+0xc] ; /* 0x00000c080a0a7981 */ /* 0x000ea4000c1e1900 */ /*0320*/ FADD R17, R7, R10 ; /* 0x0000000a07117221 */ /* 0x004fca0000000000 */ /*0330*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0340*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0350*/ IADD3 R12, P1, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x008fc8000ff3e0ff */ /*0360*/ IADD3.X R13, R9, UR6, RZ, P1, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca0008ffe4ff */ /*0370*/ LDG.E R12, [R12.64+0x10] ; /* 0x000010080c0c7981 */ /* 0x000e64000c1e1900 */ /*0380*/ FADD R7, R17, R12 ; /* 0x0000000c11077221 */ /* 0x002fca0000000000 */ /*0390*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*03a0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*03b0*/ IADD3 R14, P1, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff3e0ff */ /*03c0*/ IADD3.X R15, R9, UR6, RZ, P1, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca0008ffe4ff */ /*03d0*/ LDG.E R14, [R14.64+0x14] ; /* 0x000014080e0e7981 */ /* 0x000ea4000c1e1900 */ /*03e0*/ FADD R17, R7, R14 ; /* 0x0000000e07117221 */ /* 0x004fca0000000000 */ /*03f0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0400*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0410*/ IADD3 R10, P1, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff3e0ff */ /*0420*/ IADD3.X R11, R9, UR6, RZ, P1, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca0008ffe4ff */ /*0430*/ LDG.E R10, [R10.64+0x18] ; /* 0x000018080a0a7981 */ /* 0x000e64000c1e1900 */ /*0440*/ FADD R7, R17, R10 ; /* 0x0000000a11077221 */ /* 0x002fca0000000000 */ /*0450*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*0460*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0470*/ IADD3 R12, P1, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x008fc8000ff3e0ff */ /*0480*/ IADD3.X R13, R9, UR6, RZ, P1, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca0008ffe4ff */ /*0490*/ LDG.E R12, [R12.64+0x1c] ; /* 0x00001c080c0c7981 */ /* 0x000ea4000c1e1900 */ /*04a0*/ FADD R17, R7, R12 ; /* 0x0000000c07117221 */ /* 0x004fca0000000000 */ /*04b0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*04c0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*04d0*/ IADD3 R14, P1, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff3e0ff */ /*04e0*/ IADD3.X R15, R9, UR6, RZ, P1, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca0008ffe4ff */ /*04f0*/ LDG.E R14, [R14.64+0x20] ; /* 0x000020080e0e7981 */ /* 0x000e64000c1e1900 */ /*0500*/ FADD R7, R17, R14 ; /* 0x0000000e11077221 */ /* 0x002fca0000000000 */ /*0510*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*0520*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0530*/ IADD3 R10, P1, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff3e0ff */ /*0540*/ IADD3.X R11, R9, UR6, RZ, P1, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca0008ffe4ff */ /*0550*/ LDG.E R10, [R10.64+0x24] ; /* 0x000024080a0a7981 */ /* 0x000ea4000c1e1900 */ /*0560*/ FADD R17, R7, R10 ; /* 0x0000000a07117221 */ /* 0x004fca0000000000 */ /*0570*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0580*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0590*/ IADD3 R12, P1, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x008fc8000ff3e0ff */ /*05a0*/ IADD3.X R13, R9, UR6, RZ, P1, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca0008ffe4ff */ /*05b0*/ LDG.E R12, [R12.64+0x28] ; /* 0x000028080c0c7981 */ /* 0x000e64000c1e1900 */ /*05c0*/ FADD R7, R17, R12 ; /* 0x0000000c11077221 */ /* 0x002fca0000000000 */ /*05d0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*05e0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*05f0*/ IADD3 R14, P1, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff3e0ff */ /*0600*/ IADD3.X R15, R9, UR6, RZ, P1, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca0008ffe4ff */ /*0610*/ LDG.E R14, [R14.64+0x2c] ; /* 0x00002c080e0e7981 */ /* 0x000ea4000c1e1900 */ /*0620*/ FADD R17, R7, R14 ; /* 0x0000000e07117221 */ /* 0x004fca0000000000 */ /*0630*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0640*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0650*/ IADD3 R10, P1, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff3e0ff */ /*0660*/ IADD3.X R11, R9, UR6, RZ, P1, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca0008ffe4ff */ /*0670*/ LDG.E R10, [R10.64+0x30] ; /* 0x000030080a0a7981 */ /* 0x000e64000c1e1900 */ /*0680*/ FADD R7, R17, R10 ; /* 0x0000000a11077221 */ /* 0x002fca0000000000 */ /*0690*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*06a0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*06b0*/ IADD3 R12, P1, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x008fc8000ff3e0ff */ /*06c0*/ IADD3.X R13, R9, UR6, RZ, P1, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca0008ffe4ff */ /*06d0*/ LDG.E R12, [R12.64+0x34] ; /* 0x000034080c0c7981 */ /* 0x000ea4000c1e1900 */ /*06e0*/ FADD R17, R7, R12 ; /* 0x0000000c07117221 */ /* 0x004fca0000000000 */ /*06f0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0700*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0710*/ IADD3 R14, P1, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff3e0ff */ /*0720*/ IADD3.X R15, R9, UR6, RZ, P1, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca0008ffe4ff */ /*0730*/ LDG.E R14, [R14.64+0x38] ; /* 0x000038080e0e7981 */ /* 0x000e64000c1e1900 */ /*0740*/ FADD R7, R17, R14 ; /* 0x0000000e11077221 */ /* 0x002fca0000000000 */ /*0750*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0005e8000c101908 */ /*0760*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee2000c1e1b00 */ /*0770*/ IADD3 R6, R6, -0x10, RZ ; /* 0xfffffff006067810 */ /* 0x000fe40007ffe0ff */ /*0780*/ IADD3 R8, P1, R8, UR5, RZ ; /* 0x0000000508087c10 */ /* 0x008fc8000ff3e0ff */ /*0790*/ IADD3.X R9, R9, UR6, RZ, P1, !PT ; /* 0x0000000609097c10 */ /* 0x000fca0008ffe4ff */ /*07a0*/ LDG.E R8, [R8.64+0x3c] ; /* 0x00003c0808087981 */ /* 0x000ee2000c1e1900 */ /*07b0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */ /* 0x000fe20003f24270 */ /*07c0*/ UIADD3 UR5, UP0, UR5, 0x40, URZ ; /* 0x0000004005057890 */ /* 0x000fe4000ff1e03f */ /*07d0*/ UIADD3 UR4, UR4, 0x10, URZ ; /* 0x0000001004047890 */ /* 0x000fe4000fffe03f */ /*07e0*/ UIADD3.X UR6, URZ, UR6, URZ, UP0, !UPT ; /* 0x000000063f067290 */ /* 0x000fe200087fe43f */ /*07f0*/ FADD R13, R7, R8 ; /* 0x00000008070d7221 */ /* 0x008fca0000000000 */ /*0800*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */ /* 0x0005e4000c101908 */ /*0810*/ @P1 BRA 0x1c0 ; /* 0xfffff9a000001947 */ /* 0x000fea000383ffff */ /*0820*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */ /* 0x000fda0003f24270 */ /*0830*/ @!P1 BRA 0xb90 ; /* 0x0000035000009947 */ /* 0x000fea0003800000 */ /*0840*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0850*/ IADD3 R10, P0, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff1e0ff */ /*0860*/ IADD3.X R11, R9, UR6, RZ, P0, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca00087fe4ff */ /*0870*/ LDG.E R10, [R10.64] ; /* 0x000000080a0a7981 */ /* 0x000ee4000c1e1900 */ /*0880*/ FADD R7, R13, R10 ; /* 0x0000000a0d077221 */ /* 0x00cfca0000000000 */ /*0890*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*08a0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ea4000c1e1b00 */ /*08b0*/ IADD3 R12, P0, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x004fc8000ff1e0ff */ /*08c0*/ IADD3.X R13, R9, UR6, RZ, P0, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca00087fe4ff */ /*08d0*/ LDG.E R12, [R12.64+0x4] ; /* 0x000004080c0c7981 */ /* 0x000ea4000c1e1900 */ /*08e0*/ FADD R17, R7, R12 ; /* 0x0000000c07117221 */ /* 0x004fca0000000000 */ /*08f0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0900*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0910*/ IADD3 R14, P0, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff1e0ff */ /*0920*/ IADD3.X R15, R9, UR6, RZ, P0, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca00087fe4ff */ /*0930*/ LDG.E R14, [R14.64+0x8] ; /* 0x000008080e0e7981 */ /* 0x000e64000c1e1900 */ /*0940*/ FADD R7, R17, R14 ; /* 0x0000000e11077221 */ /* 0x002fca0000000000 */ /*0950*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*0960*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0970*/ IADD3 R10, P0, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff1e0ff */ /*0980*/ IADD3.X R11, R9, UR6, RZ, P0, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca00087fe4ff */ /*0990*/ LDG.E R10, [R10.64+0xc] ; /* 0x00000c080a0a7981 */ /* 0x000ea4000c1e1900 */ /*09a0*/ FADD R17, R7, R10 ; /* 0x0000000a07117221 */ /* 0x004fca0000000000 */ /*09b0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*09c0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*09d0*/ IADD3 R12, P0, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x008fc8000ff1e0ff */ /*09e0*/ IADD3.X R13, R9, UR6, RZ, P0, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca00087fe4ff */ /*09f0*/ LDG.E R12, [R12.64+0x10] ; /* 0x000010080c0c7981 */ /* 0x000e64000c1e1900 */ /*0a00*/ FADD R7, R17, R12 ; /* 0x0000000c11077221 */ /* 0x002fca0000000000 */ /*0a10*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*0a20*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0a30*/ IADD3 R14, P0, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff1e0ff */ /*0a40*/ IADD3.X R15, R9, UR6, RZ, P0, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca00087fe4ff */ /*0a50*/ LDG.E R14, [R14.64+0x14] ; /* 0x000014080e0e7981 */ /* 0x000ea4000c1e1900 */ /*0a60*/ FADD R17, R7, R14 ; /* 0x0000000e07117221 */ /* 0x004fca0000000000 */ /*0a70*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0a80*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0a90*/ IADD3 R10, P0, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff1e0ff */ /*0aa0*/ IADD3.X R11, R9, UR6, RZ, P0, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca00087fe4ff */ /*0ab0*/ LDG.E R10, [R10.64+0x18] ; /* 0x000018080a0a7981 */ /* 0x000e64000c1e1900 */ /*0ac0*/ FADD R7, R17, R10 ; /* 0x0000000a11077221 */ /* 0x002fca0000000000 */ /*0ad0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0005e8000c101908 */ /*0ae0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0af0*/ IADD3 R8, P0, R8, UR5, RZ ; /* 0x0000000508087c10 */ /* 0x008fc8000ff1e0ff */ /*0b00*/ IADD3.X R9, R9, UR6, RZ, P0, !PT ; /* 0x0000000609097c10 */ /* 0x000fca00087fe4ff */ /*0b10*/ LDG.E R8, [R8.64+0x1c] ; /* 0x00001c0808087981 */ /* 0x000ee2000c1e1900 */ /*0b20*/ UIADD3 UR5, UP0, UR5, 0x20, URZ ; /* 0x0000002005057890 */ /* 0x000fe2000ff1e03f */ /*0b30*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe20003f0e170 */ /*0b40*/ UIADD3 UR4, UR4, 0x8, URZ ; /* 0x0000000804047890 */ /* 0x000fe2000fffe03f */ /*0b50*/ IADD3 R6, R6, -0x8, RZ ; /* 0xfffffff806067810 */ /* 0x000fe20007ffe0ff */ /*0b60*/ UIADD3.X UR6, URZ, UR6, URZ, UP0, !UPT ; /* 0x000000063f067290 */ /* 0x000fe200087fe43f */ /*0b70*/ FADD R13, R7, R8 ; /* 0x00000008070d7221 */ /* 0x008fca0000000000 */ /*0b80*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */ /* 0x0005e8000c101908 */ /*0b90*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */ /* 0x000fda0000705670 */ /*0ba0*/ @!P0 BRA 0xd90 ; /* 0x000001e000008947 */ /* 0x000fea0003800000 */ /*0bb0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0bc0*/ IADD3 R10, P0, R8, UR5, RZ ; /* 0x00000005080a7c10 */ /* 0x008fc8000ff1e0ff */ /*0bd0*/ IADD3.X R11, R9, UR6, RZ, P0, !PT ; /* 0x00000006090b7c10 */ /* 0x000fca00087fe4ff */ /*0be0*/ LDG.E R10, [R10.64] ; /* 0x000000080a0a7981 */ /* 0x000ee4000c1e1900 */ /*0bf0*/ FADD R7, R10, R13 ; /* 0x0000000d0a077221 */ /* 0x00cfca0000000000 */ /*0c00*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0003e8000c101908 */ /*0c10*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ea4000c1e1b00 */ /*0c20*/ IADD3 R12, P0, R8, UR5, RZ ; /* 0x00000005080c7c10 */ /* 0x004fc8000ff1e0ff */ /*0c30*/ IADD3.X R13, R9, UR6, RZ, P0, !PT ; /* 0x00000006090d7c10 */ /* 0x000fca00087fe4ff */ /*0c40*/ LDG.E R12, [R12.64+0x4] ; /* 0x000004080c0c7981 */ /* 0x000ea4000c1e1900 */ /*0c50*/ FADD R17, R7, R12 ; /* 0x0000000c07117221 */ /* 0x004fca0000000000 */ /*0c60*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0005e8000c101908 */ /*0c70*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee4000c1e1b00 */ /*0c80*/ IADD3 R14, P0, R8, UR5, RZ ; /* 0x00000005080e7c10 */ /* 0x008fc8000ff1e0ff */ /*0c90*/ IADD3.X R15, R9, UR6, RZ, P0, !PT ; /* 0x00000006090f7c10 */ /* 0x000fca00087fe4ff */ /*0ca0*/ LDG.E R14, [R14.64+0x8] ; /* 0x000008080e0e7981 */ /* 0x000e64000c1e1900 */ /*0cb0*/ FADD R7, R17, R14 ; /* 0x0000000e11077221 */ /* 0x002fca0000000000 */ /*0cc0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0005e8000c101908 */ /*0cd0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000804087981 */ /* 0x000ee2000c1e1b00 */ /*0ce0*/ IADD3 R6, R6, -0x4, RZ ; /* 0xfffffffc06067810 */ /* 0x000fe40007ffe0ff */ /*0cf0*/ IADD3 R8, P0, R8, UR5, RZ ; /* 0x0000000508087c10 */ /* 0x008fc8000ff1e0ff */ /*0d00*/ IADD3.X R9, R9, UR6, RZ, P0, !PT ; /* 0x0000000609097c10 */ /* 0x000fca00087fe4ff */ /*0d10*/ LDG.E R8, [R8.64+0xc] ; /* 0x00000c0808087981 */ /* 0x000ee2000c1e1900 */ /*0d20*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe20003f05270 */ /*0d30*/ UIADD3 UR5, UP0, UR5, 0x10, URZ ; /* 0x0000001005057890 */ /* 0x000fe4000ff1e03f */ /*0d40*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */ /* 0x000fe4000fffe03f */ /*0d50*/ UIADD3.X UR6, URZ, UR6, URZ, UP0, !UPT ; /* 0x000000063f067290 */ /* 0x000fe200087fe43f */ /*0d60*/ FADD R13, R7, R8 ; /* 0x00000008070d7221 */ /* 0x008fca0000000000 */ /*0d70*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */ /* 0x0005e4000c101908 */ /*0d80*/ @P0 BRA 0xbb0 ; /* 0xfffffe2000000947 */ /* 0x005fea000383ffff */ /*0d90*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fda0003f05270 */ /*0da0*/ @!P0 BRA 0xe70 ; /* 0x000000c000008947 */ /* 0x000fea0003800000 */ /*0db0*/ UIMAD.WIDE UR4, UR4, 0x4, URZ ; /* 0x00000004040478a5 */ /* 0x000fc6000f8e023f */ /*0dc0*/ LDG.E.64 R6, [R4.64] ; /* 0x0000000804067981 */ /* 0x004ea2000c1e1b00 */ /*0dd0*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */ /* 0x000fe40007ffe0ff */ /*0de0*/ IADD3 R6, P0, R6, UR4, RZ ; /* 0x0000000406067c10 */ /* 0x004fc8000ff1e0ff */ /*0df0*/ IADD3.X R7, R7, UR5, RZ, P0, !PT ; /* 0x0000000507077c10 */ /* 0x000fca00087fe4ff */ /*0e00*/ LDG.E R6, [R6.64] ; /* 0x0000000806067981 */ /* 0x000ea2000c1e1900 */ /*0e10*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fe20003f05270 */ /*0e20*/ UIADD3 UR4, UP0, UR4, 0x4, URZ ; /* 0x0000000404047890 */ /* 0x000fc8000ff1e03f */ /*0e30*/ UIADD3.X UR5, URZ, UR5, URZ, UP0, !UPT ; /* 0x000000053f057290 */ /* 0x000fe200087fe43f */ /*0e40*/ FADD R13, R6, R13 ; /* 0x0000000d060d7221 */ /* 0x006fca0000000000 */ /*0e50*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */ /* 0x0003e4000c101908 */ /*0e60*/ @P0 BRA 0xdc0 ; /* 0xffffff5000000947 */ /* 0x000fea000383ffff */ /*0e70*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0e80*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0e90*/ BRA 0xe90; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0ea0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0eb0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ec0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ed0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ee0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ef0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected sampleKernel .globl sampleKernel .p2align 8 .type sampleKernel,@function sampleKernel: s_clause 0x1 s_load_b32 s4, s[0:1], 0x8 s_load_b64 s[2:3], s[0:1], 0x10 v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 2, v0 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s4, 1 global_store_b32 v1, v2, s[2:3] s_cbranch_scc1 .LBB0_3 s_load_b64 s[0:1], s[0:1], 0x0 v_lshlrev_b32_e32 v2, 3, v0 v_add_co_u32 v0, s2, s2, v1 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v1, null, s3, 0, s2 s_waitcnt lgkmcnt(0) global_load_b64 v[2:3], v2, s[0:1] global_load_b32 v4, v[0:1], off .LBB0_2: s_waitcnt vmcnt(1) flat_load_b32 v5, v[2:3] v_add_co_u32 v2, vcc_lo, v2, 4 v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo s_add_i32 s4, s4, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s4, 0 s_waitcnt vmcnt(0) lgkmcnt(0) v_add_f32_e32 v4, v5, v4 global_store_b32 v[0:1], v4, off s_cbranch_scc0 .LBB0_2 .LBB0_3: s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel sampleKernel .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 5 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size sampleKernel, .Lfunc_end0-sampleKernel .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: sampleKernel .private_segment_fixed_size: 0 .sgpr_count: 7 .sgpr_spill_count: 0 .symbol: sampleKernel.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000d5526_00000000-6_JCudaSampleKernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z36__device_stub__Z12sampleKernelPPfiS_PPfiS_ .type _Z36__device_stub__Z12sampleKernelPPfiS_PPfiS_, @function _Z36__device_stub__Z12sampleKernelPPfiS_PPfiS_: .LFB2051: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq sampleKernel(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z36__device_stub__Z12sampleKernelPPfiS_PPfiS_, .-_Z36__device_stub__Z12sampleKernelPPfiS_PPfiS_ .globl sampleKernel .type sampleKernel, @function sampleKernel: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z12sampleKernelPPfiS_PPfiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size sampleKernel, .-sampleKernel .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "sampleKernel" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq sampleKernel(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "JCudaSampleKernel.hip" .globl __device_stub__sampleKernel # -- Begin function __device_stub__sampleKernel .p2align 4, 0x90 .type __device_stub__sampleKernel,@function __device_stub__sampleKernel: # @__device_stub__sampleKernel .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movq %rdx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $sampleKernel, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size __device_stub__sampleKernel, .Lfunc_end0-__device_stub__sampleKernel .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $sampleKernel, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type sampleKernel,@object # @sampleKernel .section .rodata,"a",@progbits .globl sampleKernel .p2align 3, 0x0 sampleKernel: .quad __device_stub__sampleKernel .size sampleKernel, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "sampleKernel" .size .L__unnamed_1, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__sampleKernel .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym sampleKernel .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <unistd.h> #include <stdio.h> #include "cuda.h" #include <sys/time.h> #define threshold 1e-2 #define n (4096) #define m (3) void init(void); void ref(void); #define TILE_SIZE 4 #define KS_DIV_2 (KERNEL_SIZE >> 1) #define KERNEL_SIZE 3 __constant__ double Mc[KERNEL_SIZE*KERNEL_SIZE]; void compare(int N, double *wref, double *w); __global__ void ConvolutionKernel(double* N, double* P, int inp_size){ __shared__ float tileNs[TILE_SIZE][TILE_SIZE]; // get thread indices int tx = threadIdx.x; int ty = threadIdx.y; // get the output indices int row_o = ty + blockIdx.y * TILE_SIZE; int col_o = tx + blockIdx.x * TILE_SIZE; // shift to obtain input indices int row_i = row_o - KS_DIV_2; int col_i = col_o - KS_DIV_2; // Load tile elements if(row_i >= 0 && row_i < inp_size && col_i >= 0 && col_i < inp_size) tileNs[ty][tx] = N[row_i*inp_size + col_i]; else tileNs[ty][tx] = 0.0f; // Wait until all tile elements are loaded __syncthreads(); // only compute if you're an output tile element if(tx < TILE_SIZE && ty < TILE_SIZE){ float pValue = 0.0f; for(int y=0; y<KERNEL_SIZE; y++) for(int x=0; x<KERNEL_SIZE; x++){ pValue += Mc[y*KERNEL_SIZE + x] * tileNs[y+ty][x+tx]; } // only write values if you are inside matrix bounds if(row_o < inp_size && col_o < inp_size) P[row_o*inp_size + col_o] = pValue; } } double rtclock(void); double a[n*n],b[m*m],c[n*n],cref[n*n]; int main(){ int i,j; cudaDeviceProp dev_prop; cudaGetDeviceProperties(&dev_prop,0); printf("dev_prop.totalConstMem = %lu\n",dev_prop.totalConstMem); double clkbegin, clkend, t; double *Nd,*Md,*Pd; dim3 blkDim(TILE_SIZE, TILE_SIZE); dim3 grdDim(n/TILE_SIZE, n/TILE_SIZE); int size_input, size_mask; int M=m, N=n; printf("Input Size = %dx%d\n",n,n); printf("Mask size = %dx%d\n",m,m); init(); clkbegin = rtclock(); ref(); clkend = rtclock(); t = clkend-clkbegin; printf("Seq: Approx GFLOPS: %.6f ; Time = %.6f sec; \n", n*n*m*m/t/1e9,t); size_input = sizeof(double)*n*n; size_mask = sizeof(double)*m*m; cudaMalloc((void **) &Nd,size_input); cudaMalloc((void **) &Md,size_mask); cudaMalloc((void **) &Pd,size_input); cudaMemcpyToSymbol(Mc, b, size_mask); cudaMemcpy(Nd,a,size_input,cudaMemcpyHostToDevice); cudaMemcpy(Md,b,size_mask,cudaMemcpyHostToDevice); clkbegin = rtclock(); //conv1d_basic<<<grid, threads>>>(Nd,Md,Pd,m,n); ConvolutionKernel<<< blkDim , grdDim >>>(Nd,Pd,n); if (cudaDeviceSynchronize() != cudaSuccess) printf ("Error return for test_kernel\n"); else{ clkend = rtclock(); t = clkend-clkbegin; cudaMemcpy(c,Pd,size_input,cudaMemcpyDeviceToHost); cudaFree(Nd); cudaFree(Md); cudaFree(Pd); printf("GPU: Approx GFLOPS: %.6f ; Time = %.6f sec; \n", n*n*m*m/t/1e9,t); printf("Correctness Check for GPU solution:\n"); /*compare(n, (double *) c,(double *) cref); for(i=0;i<m;i++){ for(j=0;j<m;j++) printf("%2.0lf ",b[i*m+j]); printf("\n"); } printf("\n\n"); for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%2.0lf ",a[i*n+j]); printf("\n"); } for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%3.0lf ",c[i*n+j]); printf("\n"); } */ printf("Correct!\n"); } } void ref(void){ int i,j,k,l,x,y; for(i=0;i<n;i++) for(j=0;j<n;j++){ k = i-m/2; l = j-m/2; for(x=0;x<m;x++) for(y=0;y<m;y++) if((k+x >= 0 && k+x < m) && (l+y >= 0 && l+y < m)) cref[i*n+j] += a[(k+x)*n + (l+y)]*b[x*m + m]; } } void init(void){ int i,j; for(i=0;i<n;i++) for(j=0;j<n;j++) a[i*n+j] = i+j; //drand48() for(i=0;i<m;i++) for(j=0;j<m;j++) b[i*m+j] = i+j; } void compare(int N, double *wref, double *w){ double maxdiff,this_diff; int numdiffs; int i; numdiffs = 0; maxdiff = 0; for (i=0;i<N;i++) { this_diff = wref[i]-w[i]; if (this_diff < 0) this_diff = -1.0*this_diff; if (this_diff>threshold) { numdiffs++; if (this_diff > maxdiff) maxdiff=this_diff; } } if (numdiffs > 0) printf("%d Diffs found over threshold %f; Max Diff = %f\n", numdiffs,threshold,maxdiff); else printf("No differences found between reference and test versions\n"); } double rtclock(void){ struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } /* __global__ void test_kernel(int N, double *A, double *B, double *C){ //int x=threadIdx.y+blockIdx.y*blockDim.y; //int y=threadIdx.x+blockIdx.x*blockDim.x; double sum; sum=0; __shared__ double Ads[TILE_WIDTH][TILE_WIDTH]; __shared__ double Bds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*TILE_WIDTH + ty; int col = bx*TILE_WIDTH + tx; for(int m=0; m<N/TILE_WIDTH; ++m){ if(row < N && (m*TILE_WIDTH +tx) < N) Ads[ty][tx] = A[row*N + TILE_WIDTH*m + tx]; else Ads[ty][tx] = 0; if(m*TILE_WIDTH + ty < N && col < N) Bds[ty][tx] = B[(m*TILE_WIDTH + ty)*N + col]; else Bds[ty][tx] = 0; __syncthreads(); for(int k=0; k<TILE_WIDTH; ++k) sum += Ads[ty][k]*Bds[k][tx]; __syncthreads(); } if(row < N && col < N) C[row*N + col] = sum; /* if((x<N)&&(y<N)) for (int k=0;k<N;k+=4){ sum += A[x*N+k]*B[y*N+k]; sum += A[x*N+k+1]*B[y*N+k+1]; sum += A[x*N+k+2]*B[y*N+k+2]; sum += A[x*N+k+3]*B[y*N+k+3]; } C[x*N+y]=sum; }*/
code for sm_80 Function : _Z17ConvolutionKernelPdS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */ /* 0x000e220000002600 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R9, SR_TID.Y ; /* 0x0000000000097919 */ /* 0x000e280000002200 */ /*0040*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */ /* 0x000e680000002100 */ /*0050*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e620000002500 */ /*0060*/ IMAD R0, R0, 0x4, R9 ; /* 0x0000000400007824 */ /* 0x001fca00078e0209 */ /*0070*/ ISETP.GT.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */ /* 0x000fe20003f04270 */ /*0080*/ IMAD R3, R3, 0x4, R6 ; /* 0x0000000403037824 */ /* 0x002fc600078e0206 */ /*0090*/ ISETP.GT.AND P0, PT, R0, RZ, !P0 ; /* 0x000000ff0000720c */ /* 0x000fc80004704270 */ /*00a0*/ ISETP.GT.AND P0, PT, R3, RZ, P0 ; /* 0x000000ff0300720c */ /* 0x000fc80000704270 */ /*00b0*/ ISETP.LE.AND P0, PT, R3, c[0x0][0x170], P0 ; /* 0x00005c0003007a0c */ /* 0x000fda0000703270 */ /*00c0*/ @P0 IADD3 R2, R0, -0x1, RZ ; /* 0xffffffff00020810 */ /* 0x000fe20007ffe0ff */ /*00d0*/ @P0 IMAD.MOV.U32 R5, RZ, RZ, 0x8 ; /* 0x00000008ff050424 */ /* 0x000fc800078e00ff */ /*00e0*/ @P0 IMAD R2, R2, c[0x0][0x170], R3 ; /* 0x00005c0002020a24 */ /* 0x000fca00078e0203 */ /*00f0*/ @P0 IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02020810 */ /* 0x000fca0007ffe0ff */ /*0100*/ @P0 IMAD.WIDE R4, R2, R5, c[0x0][0x160] ; /* 0x0000580002040625 */ /* 0x000fcc00078e0205 */ /*0110*/ @P0 LDG.E.64 R4, [R4.64] ; /* 0x0000000404040981 */ /* 0x000ea2000c1e1b00 */ /*0120*/ IMAD R2, R9.reuse, 0x4, R6 ; /* 0x0000000409027824 */ /* 0x040fe200078e0206 */ /*0130*/ ISETP.GT.AND P1, PT, R9, 0x3, PT ; /* 0x000000030900780c */ /* 0x000fc80003f24270 */ /*0140*/ @!P0 STS [R2.X4], RZ ; /* 0x000000ff02008388 */ /* 0x0001e20000004800 */ /*0150*/ ISETP.GT.OR P1, PT, R6, 0x3, P1 ; /* 0x000000030600780c */ /* 0x000fe20000f24670 */ /*0160*/ @P0 F2F.F32.F64 R7, R4 ; /* 0x0000000400070310 */ /* 0x004e640000301000 */ /*0170*/ @P0 STS [R2.X4], R7 ; /* 0x0000000702000388 */ /* 0x0021e80000004800 */ /*0180*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0190*/ @P1 EXIT ; /* 0x000000000000194d */ /* 0x000fea0003800000 */ /*01a0*/ LDS R12, [R2.X4] ; /* 0x00000000020c7984 */ /* 0x001e220000004800 */ /*01b0*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */ /* 0x000fc60003f06270 */ /*01c0*/ LDS R14, [R2.X4+0x4] ; /* 0x00000400020e7984 */ /* 0x000e620000004800 */ /*01d0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */ /* 0x000fc60000706670 */ /*01e0*/ LDS R16, [R2.X4+0x8] ; /* 0x0000080002107984 */ /* 0x000fe80000004800 */ /*01f0*/ LDS R17, [R2.X4+0x10] ; /* 0x0000100002117984 */ /* 0x000ea80000004800 */ /*0200*/ LDS R18, [R2.X4+0x18] ; /* 0x0000180002127984 */ /* 0x000fe20000004800 */ /*0210*/ F2F.F64.F32 R4, R12 ; /* 0x0000000c00047310 */ /* 0x001e300000201800 */ /*0220*/ F2F.F64.F32 R6, R14 ; /* 0x0000000e00067310 */ /* 0x0023e20000201800 */ /*0230*/ DFMA R4, R4, c[0x3][0x0], RZ ; /* 0x00c0000004047a2b */ /* 0x001e2200000000ff */ /*0240*/ LDS R14, [R2.X4+0x14] ; /* 0x00001400020e7984 */ /* 0x002e6c0000004800 */ /*0250*/ F2F.F32.F64 R13, R4 ; /* 0x00000004000d7310 */ /* 0x001e300000301000 */ /*0260*/ F2F.F64.F32 R4, R17 ; /* 0x0000001100047310 */ /* 0x004ff00000201800 */ /*0270*/ F2F.F64.F32 R8, R13 ; /* 0x0000000d00087310 */ /* 0x001e240000201800 */ /*0280*/ DFMA R6, R6, c[0x3][0x8], R8 ; /* 0x00c0020006067a2b */ /* 0x00108c0000000008 */ /*0290*/ F2F.F64.F32 R8, R16 ; /* 0x0000001000087310 */ /* 0x0011f00000201800 */ /*02a0*/ F2F.F32.F64 R15, R6 ; /* 0x00000006000f7310 */ /* 0x004ea20000301000 */ /*02b0*/ LDS R16, [R2.X4+0x20] ; /* 0x0000200002107984 */ /* 0x001e2e0000004800 */ /*02c0*/ F2F.F64.F32 R6, R14 ; /* 0x0000000e00067310 */ /* 0x0023f00000201800 */ /*02d0*/ F2F.F64.F32 R10, R15 ; /* 0x0000000f000a7310 */ /* 0x004ea20000201800 */ /*02e0*/ LDS R14, [R2.X4+0x24] ; /* 0x00002400020e7984 */ /* 0x002e620000004800 */ /*02f0*/ DFMA R8, R8, c[0x3][0x10], R10 ; /* 0x00c0040008087a2b */ /* 0x004e8c000000000a */ /*0300*/ F2F.F32.F64 R12, R8 ; /* 0x00000008000c7310 */ /* 0x004eb00000301000 */ /*0310*/ F2F.F64.F32 R8, R18 ; /* 0x0000001200087310 */ /* 0x000ff00000201800 */ /*0320*/ F2F.F64.F32 R10, R12 ; /* 0x0000000c000a7310 */ /* 0x004ea40000201800 */ /*0330*/ DFMA R4, R4, c[0x3][0x18], R10 ; /* 0x00c0060004047a2b */ /* 0x004e8c000000000a */ /*0340*/ F2F.F32.F64 R13, R4 ; /* 0x00000004000d7310 */ /* 0x004eb00000301000 */ /*0350*/ F2F.F64.F32 R4, R16 ; /* 0x0000001000047310 */ /* 0x001ff00000201800 */ /*0360*/ F2F.F64.F32 R10, R13 ; /* 0x0000000d000a7310 */ /* 0x0040a40000201800 */ /*0370*/ LDS R13, [R2.X4+0x28] ; /* 0x00002800020d7984 */ /* 0x001e220000004800 */ /*0380*/ DFMA R6, R6, c[0x3][0x20], R10 ; /* 0x00c0080006067a2b */ /* 0x004e8c000000000a */ /*0390*/ F2F.F32.F64 R15, R6 ; /* 0x00000006000f7310 */ /* 0x004eb00000301000 */ /*03a0*/ F2F.F64.F32 R6, R14 ; /* 0x0000000e00067310 */ /* 0x002ff00000201800 */ /*03b0*/ F2F.F64.F32 R10, R15 ; /* 0x0000000f000a7310 */ /* 0x004e640000201800 */ /*03c0*/ DFMA R8, R8, c[0x3][0x28], R10 ; /* 0x00c00a0008087a2b */ /* 0x002e4c000000000a */ /*03d0*/ F2F.F32.F64 R12, R8 ; /* 0x00000008000c7310 */ /* 0x002e700000301000 */ /*03e0*/ F2F.F64.F32 R8, R13 ; /* 0x0000000d00087310 */ /* 0x001ff00000201800 */ /*03f0*/ F2F.F64.F32 R10, R12 ; /* 0x0000000c000a7310 */ /* 0x002e240000201800 */ /*0400*/ DFMA R4, R4, c[0x3][0x30], R10 ; /* 0x00c00c0004047a2b */ /* 0x001e14000000000a */ /*0410*/ F2F.F32.F64 R4, R4 ; /* 0x0000000400047310 */ /* 0x001e300000301000 */ /*0420*/ F2F.F64.F32 R10, R4 ; /* 0x00000004000a7310 */ /* 0x001e240000201800 */ /*0430*/ DFMA R6, R6, c[0x3][0x38], R10 ; /* 0x00c00e0006067a2b */ /* 0x001e14000000000a */ /*0440*/ F2F.F32.F64 R6, R6 ; /* 0x0000000600067310 */ /* 0x001e300000301000 */ /*0450*/ F2F.F64.F32 R10, R6 ; /* 0x00000006000a7310 */ /* 0x001e240000201800 */ /*0460*/ DFMA R8, R8, c[0x3][0x40], R10 ; /* 0x00c0100008087a2b */ /* 0x001062000000000a */ /*0470*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000ff40003800000 */ /*0480*/ F2F.F32.F64 R4, R8 ; /* 0x0000000800047310 */ /* 0x002e620000301000 */ /*0490*/ IMAD.MOV.U32 R2, RZ, RZ, 0x8 ; /* 0x00000008ff027424 */ /* 0x000fc400078e00ff */ /*04a0*/ IMAD R3, R0, c[0x0][0x170], R3 ; /* 0x00005c0000037a24 */ /* 0x000fc800078e0203 */ /*04b0*/ IMAD.WIDE R2, R3, R2, c[0x0][0x168] ; /* 0x00005a0003027625 */ /* 0x000fe200078e0202 */ /*04c0*/ F2F.F64.F32 R4, R4 ; /* 0x0000000400047310 */ /* 0x002e680000201800 */ /*04d0*/ STG.E.64 [R2.64], R4 ; /* 0x0000000402007986 */ /* 0x002fe2000c101b04 */ /*04e0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*04f0*/ BRA 0x4f0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0500*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0510*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0520*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0530*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0540*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0550*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0560*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0570*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <unistd.h> #include <stdio.h> #include "cuda.h" #include <sys/time.h> #define threshold 1e-2 #define n (4096) #define m (3) void init(void); void ref(void); #define TILE_SIZE 4 #define KS_DIV_2 (KERNEL_SIZE >> 1) #define KERNEL_SIZE 3 __constant__ double Mc[KERNEL_SIZE*KERNEL_SIZE]; void compare(int N, double *wref, double *w); __global__ void ConvolutionKernel(double* N, double* P, int inp_size){ __shared__ float tileNs[TILE_SIZE][TILE_SIZE]; // get thread indices int tx = threadIdx.x; int ty = threadIdx.y; // get the output indices int row_o = ty + blockIdx.y * TILE_SIZE; int col_o = tx + blockIdx.x * TILE_SIZE; // shift to obtain input indices int row_i = row_o - KS_DIV_2; int col_i = col_o - KS_DIV_2; // Load tile elements if(row_i >= 0 && row_i < inp_size && col_i >= 0 && col_i < inp_size) tileNs[ty][tx] = N[row_i*inp_size + col_i]; else tileNs[ty][tx] = 0.0f; // Wait until all tile elements are loaded __syncthreads(); // only compute if you're an output tile element if(tx < TILE_SIZE && ty < TILE_SIZE){ float pValue = 0.0f; for(int y=0; y<KERNEL_SIZE; y++) for(int x=0; x<KERNEL_SIZE; x++){ pValue += Mc[y*KERNEL_SIZE + x] * tileNs[y+ty][x+tx]; } // only write values if you are inside matrix bounds if(row_o < inp_size && col_o < inp_size) P[row_o*inp_size + col_o] = pValue; } } double rtclock(void); double a[n*n],b[m*m],c[n*n],cref[n*n]; int main(){ int i,j; cudaDeviceProp dev_prop; cudaGetDeviceProperties(&dev_prop,0); printf("dev_prop.totalConstMem = %lu\n",dev_prop.totalConstMem); double clkbegin, clkend, t; double *Nd,*Md,*Pd; dim3 blkDim(TILE_SIZE, TILE_SIZE); dim3 grdDim(n/TILE_SIZE, n/TILE_SIZE); int size_input, size_mask; int M=m, N=n; printf("Input Size = %dx%d\n",n,n); printf("Mask size = %dx%d\n",m,m); init(); clkbegin = rtclock(); ref(); clkend = rtclock(); t = clkend-clkbegin; printf("Seq: Approx GFLOPS: %.6f ; Time = %.6f sec; \n", n*n*m*m/t/1e9,t); size_input = sizeof(double)*n*n; size_mask = sizeof(double)*m*m; cudaMalloc((void **) &Nd,size_input); cudaMalloc((void **) &Md,size_mask); cudaMalloc((void **) &Pd,size_input); cudaMemcpyToSymbol(Mc, b, size_mask); cudaMemcpy(Nd,a,size_input,cudaMemcpyHostToDevice); cudaMemcpy(Md,b,size_mask,cudaMemcpyHostToDevice); clkbegin = rtclock(); //conv1d_basic<<<grid, threads>>>(Nd,Md,Pd,m,n); ConvolutionKernel<<< blkDim , grdDim >>>(Nd,Pd,n); if (cudaDeviceSynchronize() != cudaSuccess) printf ("Error return for test_kernel\n"); else{ clkend = rtclock(); t = clkend-clkbegin; cudaMemcpy(c,Pd,size_input,cudaMemcpyDeviceToHost); cudaFree(Nd); cudaFree(Md); cudaFree(Pd); printf("GPU: Approx GFLOPS: %.6f ; Time = %.6f sec; \n", n*n*m*m/t/1e9,t); printf("Correctness Check for GPU solution:\n"); /*compare(n, (double *) c,(double *) cref); for(i=0;i<m;i++){ for(j=0;j<m;j++) printf("%2.0lf ",b[i*m+j]); printf("\n"); } printf("\n\n"); for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%2.0lf ",a[i*n+j]); printf("\n"); } for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%3.0lf ",c[i*n+j]); printf("\n"); } */ printf("Correct!\n"); } } void ref(void){ int i,j,k,l,x,y; for(i=0;i<n;i++) for(j=0;j<n;j++){ k = i-m/2; l = j-m/2; for(x=0;x<m;x++) for(y=0;y<m;y++) if((k+x >= 0 && k+x < m) && (l+y >= 0 && l+y < m)) cref[i*n+j] += a[(k+x)*n + (l+y)]*b[x*m + m]; } } void init(void){ int i,j; for(i=0;i<n;i++) for(j=0;j<n;j++) a[i*n+j] = i+j; //drand48() for(i=0;i<m;i++) for(j=0;j<m;j++) b[i*m+j] = i+j; } void compare(int N, double *wref, double *w){ double maxdiff,this_diff; int numdiffs; int i; numdiffs = 0; maxdiff = 0; for (i=0;i<N;i++) { this_diff = wref[i]-w[i]; if (this_diff < 0) this_diff = -1.0*this_diff; if (this_diff>threshold) { numdiffs++; if (this_diff > maxdiff) maxdiff=this_diff; } } if (numdiffs > 0) printf("%d Diffs found over threshold %f; Max Diff = %f\n", numdiffs,threshold,maxdiff); else printf("No differences found between reference and test versions\n"); } double rtclock(void){ struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } /* __global__ void test_kernel(int N, double *A, double *B, double *C){ //int x=threadIdx.y+blockIdx.y*blockDim.y; //int y=threadIdx.x+blockIdx.x*blockDim.x; double sum; sum=0; __shared__ double Ads[TILE_WIDTH][TILE_WIDTH]; __shared__ double Bds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*TILE_WIDTH + ty; int col = bx*TILE_WIDTH + tx; for(int m=0; m<N/TILE_WIDTH; ++m){ if(row < N && (m*TILE_WIDTH +tx) < N) Ads[ty][tx] = A[row*N + TILE_WIDTH*m + tx]; else Ads[ty][tx] = 0; if(m*TILE_WIDTH + ty < N && col < N) Bds[ty][tx] = B[(m*TILE_WIDTH + ty)*N + col]; else Bds[ty][tx] = 0; __syncthreads(); for(int k=0; k<TILE_WIDTH; ++k) sum += Ads[ty][k]*Bds[k][tx]; __syncthreads(); } if(row < N && col < N) C[row*N + col] = sum; /* if((x<N)&&(y<N)) for (int k=0;k<N;k+=4){ sum += A[x*N+k]*B[y*N+k]; sum += A[x*N+k+1]*B[y*N+k+1]; sum += A[x*N+k+2]*B[y*N+k+2]; sum += A[x*N+k+3]*B[y*N+k+3]; } C[x*N+y]=sum; }*/
.file "tmpxft_00079a0e_00000000-6_2dconv_with_tiling.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2077: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2077: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z3refv .type _Z3refv, @function _Z3refv: .LFB2071: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 movl $-1, %ebx movl $0, %r13d movl $2, %r9d movl $0, %r12d leaq cref(%rip), %r10 leaq a(%rip), %r11 jmp .L4 .L6: addl $1, %eax cmpl %ecx, %eax je .L14 .L7: cmpl $2, %edx ja .L6 cmpl $2, %eax ja .L6 movl %edx, %r14d sall $12, %r14d addl %eax, %r14d movslq %r14d, %r14 movsd (%r11,%r14,8), %xmm0 mulsd 24(%rsi), %xmm0 addsd (%r10,%rdi,8), %xmm0 movsd %xmm0, (%r10,%rdi,8) jmp .L6 .L14: addq $24, %rsi addl $1, %edx cmpl %r9d, %edx je .L8 .L5: movl %r8d, %eax jmp .L7 .L8: addl $1, %ecx addl $1, %r8d cmpl $4098, %ecx je .L15 .L9: leaq b(%rip), %rsi movl %ebx, %edx leal 0(%rbp,%rcx), %edi movslq %edi, %rdi jmp .L5 .L15: addl $1, %r12d addl $1, %r9d addl $4096, %r13d addl $1, %ebx cmpl $4096, %r12d je .L3 .L4: movl $-1, %r8d movl $2, %ecx leal -2(%r13), %ebp jmp .L9 .L3: popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2071: .size _Z3refv, .-_Z3refv .globl _Z4initv .type _Z4initv, @function _Z4initv: .LFB2072: .cfi_startproc endbr64 leaq a(%rip), %rdi movl $4096, %ecx movl $0, %esi .L17: movl %esi, %eax movq %rdi, %rdx .L18: pxor %xmm0, %xmm0 cvtsi2sdl %eax, %xmm0 movsd %xmm0, (%rdx) addl $1, %eax addq $8, %rdx cmpl %eax, %ecx jne .L18 addl $1, %esi addq $32768, %rdi addl $1, %ecx cmpl $4096, %esi jne .L17 movq $0x000000000, b(%rip) movsd .LC1(%rip), %xmm1 movsd %xmm1, 8+b(%rip) movsd .LC2(%rip), %xmm0 movsd %xmm0, 16+b(%rip) movsd %xmm1, 24+b(%rip) movsd %xmm0, 32+b(%rip) movsd .LC3(%rip), %xmm1 movsd %xmm1, 40+b(%rip) movsd %xmm0, 48+b(%rip) movsd %xmm1, 56+b(%rip) movq .LC4(%rip), %rax movq %rax, 64+b(%rip) ret .cfi_endproc .LFE2072: .size _Z4initv, .-_Z4initv .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC7: .string "%d Diffs found over threshold %f; Max Diff = %f\n" .align 8 .LC8: .string "No differences found between reference and test versions\n" .text .globl _Z7compareiPdS_ .type _Z7compareiPdS_, @function _Z7compareiPdS_: .LFB2073: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 testl %edi, %edi jle .L23 movslq %edi, %rdi leaq 0(,%rdi,8), %rcx movl $0, %eax movl $0, %edi pxor %xmm1, %xmm1 movapd %xmm1, %xmm3 movq .LC5(%rip), %xmm4 movsd .LC6(%rip), %xmm2 jmp .L28 .L36: xorpd %xmm4, %xmm0 jmp .L24 .L26: addq $8, %rax cmpq %rax, %rcx je .L35 .L28: movsd (%rsi,%rax), %xmm0 subsd (%rdx,%rax), %xmm0 comisd %xmm0, %xmm3 ja .L36 .L24: comisd %xmm2, %xmm0 jbe .L26 addl $1, %edi maxsd %xmm1, %xmm0 movapd %xmm0, %xmm1 jmp .L26 .L35: testl %edi, %edi jle .L23 movsd .LC6(%rip), %xmm0 movl %edi, %edx leaq .LC7(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT .L22: addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L22 .cfi_endproc .LFE2073: .size _Z7compareiPdS_, .-_Z7compareiPdS_ .section .rodata.str1.8 .align 8 .LC9: .string "Error return from gettimeofday: %d" .text .globl _Z7rtclockv .type _Z7rtclockv, @function _Z7rtclockv: .LFB2074: .cfi_startproc endbr64 subq $56, %rsp .cfi_def_cfa_offset 64 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leaq 8(%rsp), %rsi leaq 16(%rsp), %rdi call gettimeofday@PLT testl %eax, %eax jne .L41 .L38: pxor %xmm0, %xmm0 cvtsi2sdq 24(%rsp), %xmm0 mulsd .LC10(%rip), %xmm0 pxor %xmm1, %xmm1 cvtsi2sdq 16(%rsp), %xmm1 addsd %xmm1, %xmm0 movq 40(%rsp), %rax subq %fs:40, %rax jne .L42 addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L41: .cfi_restore_state movl %eax, %edx leaq .LC9(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L38 .L42: call __stack_chk_fail@PLT .cfi_endproc .LFE2074: .size _Z7rtclockv, .-_Z7rtclockv .globl _Z40__device_stub__Z17ConvolutionKernelPdS_iPdS_i .type _Z40__device_stub__Z17ConvolutionKernelPdS_iPdS_i, @function _Z40__device_stub__Z17ConvolutionKernelPdS_iPdS_i: .LFB2099: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L47 .L43: movq 120(%rsp), %rax subq %fs:40, %rax jne .L48 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L47: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z17ConvolutionKernelPdS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L43 .L48: call __stack_chk_fail@PLT .cfi_endproc .LFE2099: .size _Z40__device_stub__Z17ConvolutionKernelPdS_iPdS_i, .-_Z40__device_stub__Z17ConvolutionKernelPdS_iPdS_i .globl _Z17ConvolutionKernelPdS_i .type _Z17ConvolutionKernelPdS_i, @function _Z17ConvolutionKernelPdS_i: .LFB2100: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z40__device_stub__Z17ConvolutionKernelPdS_iPdS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2100: .size _Z17ConvolutionKernelPdS_i, .-_Z17ConvolutionKernelPdS_i .section .rodata.str1.1,"aMS",@progbits,1 .LC11: .string "dev_prop.totalConstMem = %lu\n" .LC12: .string "Input Size = %dx%d\n" .LC13: .string "Mask size = %dx%d\n" .section .rodata.str1.8 .align 8 .LC16: .string "Seq: Approx GFLOPS: %.6f ; Time = %.6f sec; \n" .section .rodata.str1.1 .LC17: .string "Error return for test_kernel\n" .section .rodata.str1.8 .align 8 .LC18: .string "GPU: Approx GFLOPS: %.6f ; Time = %.6f sec; \n" .align 8 .LC19: .string "Correctness Check for GPU solution:\n" .section .rodata.str1.1 .LC20: .string "Correct!\n" .text .globl main .type main, @function main: .LFB2070: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $1104, %rsp .cfi_def_cfa_offset 1120 movq %fs:40, %rax movq %rax, 1096(%rsp) xorl %eax, %eax leaq 64(%rsp), %rdi movl $0, %esi call cudaGetDeviceProperties_v2@PLT movq 416(%rsp), %rdx leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $4, 40(%rsp) movl $4, 44(%rsp) movl $1, 48(%rsp) movl $1024, 52(%rsp) movl $1024, 56(%rsp) movl $1, 60(%rsp) movl $4096, %ecx movl $4096, %edx leaq .LC12(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $3, %ecx movl $3, %edx leaq .LC13(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call _Z4initv call _Z7rtclockv movsd %xmm0, 8(%rsp) call _Z3refv call _Z7rtclockv movapd %xmm0, %xmm1 subsd 8(%rsp), %xmm1 movsd .LC14(%rip), %xmm0 divsd %xmm1, %xmm0 divsd .LC15(%rip), %xmm0 leaq .LC16(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT leaq 16(%rsp), %rdi movl $134217728, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $72, %esi call cudaMalloc@PLT leaq 32(%rsp), %rdi movl $134217728, %esi call cudaMalloc@PLT movl $1, %r8d movl $0, %ecx movl $72, %edx leaq b(%rip), %rbx movq %rbx, %rsi leaq _ZL2Mc(%rip), %rdi call cudaMemcpyToSymbol@PLT movl $1, %ecx movl $134217728, %edx leaq a(%rip), %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $72, %edx movq %rbx, %rsi movq 24(%rsp), %rdi call cudaMemcpy@PLT call _Z7rtclockv movsd %xmm0, 8(%rsp) movl 60(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 52(%rsp), %rdx movq 40(%rsp), %rdi movl 48(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L57 .L52: call cudaDeviceSynchronize@PLT testl %eax, %eax je .L53 leaq .LC17(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L54: movq 1096(%rsp), %rax subq %fs:40, %rax jne .L58 movl $0, %eax addq $1104, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L57: .cfi_restore_state movl $4096, %edx movq 32(%rsp), %rsi movq 16(%rsp), %rdi call _Z40__device_stub__Z17ConvolutionKernelPdS_iPdS_i jmp .L52 .L53: call _Z7rtclockv subsd 8(%rsp), %xmm0 movsd %xmm0, 8(%rsp) movl $2, %ecx movl $134217728, %edx movq 32(%rsp), %rsi leaq c(%rip), %rdi call cudaMemcpy@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT movsd .LC14(%rip), %xmm0 movsd 8(%rsp), %xmm1 divsd %xmm1, %xmm0 divsd .LC15(%rip), %xmm0 leaq .LC18(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT leaq .LC19(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC20(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L54 .L58: call __stack_chk_fail@PLT .cfi_endproc .LFE2070: .size main, .-main .section .rodata.str1.1 .LC21: .string "_Z17ConvolutionKernelPdS_i" .LC22: .string "Mc" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2102: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC21(%rip), %rdx movq %rdx, %rcx leaq _Z17ConvolutionKernelPdS_i(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $1 .cfi_def_cfa_offset 32 movl $72, %r9d movl $0, %r8d leaq .LC22(%rip), %rdx movq %rdx, %rcx leaq _ZL2Mc(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2102: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .globl cref .bss .align 32 .type cref, @object .size cref, 134217728 cref: .zero 134217728 .globl c .align 32 .type c, @object .size c, 134217728 c: .zero 134217728 .globl b .align 32 .type b, @object .size b, 72 b: .zero 72 .globl a .align 32 .type a, @object .size a, 134217728 a: .zero 134217728 .local _ZL2Mc .comm _ZL2Mc,72,32 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long 0 .long 1072693248 .align 8 .LC2: .long 0 .long 1073741824 .align 8 .LC3: .long 0 .long 1074266112 .align 8 .LC4: .long 0 .long 1074790400 .section .rodata.cst16,"aM",@progbits,16 .align 16 .LC5: .long 0 .long -2147483648 .long 0 .long 0 .section .rodata.cst8 .align 8 .LC6: .long 1202590843 .long 1065646817 .align 8 .LC10: .long -1598689907 .long 1051772663 .align 8 .LC14: .long 0 .long 1101135872 .align 8 .LC15: .long 0 .long 1104006501 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <unistd.h> #include <stdio.h> #include "cuda.h" #include <sys/time.h> #define threshold 1e-2 #define n (4096) #define m (3) void init(void); void ref(void); #define TILE_SIZE 4 #define KS_DIV_2 (KERNEL_SIZE >> 1) #define KERNEL_SIZE 3 __constant__ double Mc[KERNEL_SIZE*KERNEL_SIZE]; void compare(int N, double *wref, double *w); __global__ void ConvolutionKernel(double* N, double* P, int inp_size){ __shared__ float tileNs[TILE_SIZE][TILE_SIZE]; // get thread indices int tx = threadIdx.x; int ty = threadIdx.y; // get the output indices int row_o = ty + blockIdx.y * TILE_SIZE; int col_o = tx + blockIdx.x * TILE_SIZE; // shift to obtain input indices int row_i = row_o - KS_DIV_2; int col_i = col_o - KS_DIV_2; // Load tile elements if(row_i >= 0 && row_i < inp_size && col_i >= 0 && col_i < inp_size) tileNs[ty][tx] = N[row_i*inp_size + col_i]; else tileNs[ty][tx] = 0.0f; // Wait until all tile elements are loaded __syncthreads(); // only compute if you're an output tile element if(tx < TILE_SIZE && ty < TILE_SIZE){ float pValue = 0.0f; for(int y=0; y<KERNEL_SIZE; y++) for(int x=0; x<KERNEL_SIZE; x++){ pValue += Mc[y*KERNEL_SIZE + x] * tileNs[y+ty][x+tx]; } // only write values if you are inside matrix bounds if(row_o < inp_size && col_o < inp_size) P[row_o*inp_size + col_o] = pValue; } } double rtclock(void); double a[n*n],b[m*m],c[n*n],cref[n*n]; int main(){ int i,j; cudaDeviceProp dev_prop; cudaGetDeviceProperties(&dev_prop,0); printf("dev_prop.totalConstMem = %lu\n",dev_prop.totalConstMem); double clkbegin, clkend, t; double *Nd,*Md,*Pd; dim3 blkDim(TILE_SIZE, TILE_SIZE); dim3 grdDim(n/TILE_SIZE, n/TILE_SIZE); int size_input, size_mask; int M=m, N=n; printf("Input Size = %dx%d\n",n,n); printf("Mask size = %dx%d\n",m,m); init(); clkbegin = rtclock(); ref(); clkend = rtclock(); t = clkend-clkbegin; printf("Seq: Approx GFLOPS: %.6f ; Time = %.6f sec; \n", n*n*m*m/t/1e9,t); size_input = sizeof(double)*n*n; size_mask = sizeof(double)*m*m; cudaMalloc((void **) &Nd,size_input); cudaMalloc((void **) &Md,size_mask); cudaMalloc((void **) &Pd,size_input); cudaMemcpyToSymbol(Mc, b, size_mask); cudaMemcpy(Nd,a,size_input,cudaMemcpyHostToDevice); cudaMemcpy(Md,b,size_mask,cudaMemcpyHostToDevice); clkbegin = rtclock(); //conv1d_basic<<<grid, threads>>>(Nd,Md,Pd,m,n); ConvolutionKernel<<< blkDim , grdDim >>>(Nd,Pd,n); if (cudaDeviceSynchronize() != cudaSuccess) printf ("Error return for test_kernel\n"); else{ clkend = rtclock(); t = clkend-clkbegin; cudaMemcpy(c,Pd,size_input,cudaMemcpyDeviceToHost); cudaFree(Nd); cudaFree(Md); cudaFree(Pd); printf("GPU: Approx GFLOPS: %.6f ; Time = %.6f sec; \n", n*n*m*m/t/1e9,t); printf("Correctness Check for GPU solution:\n"); /*compare(n, (double *) c,(double *) cref); for(i=0;i<m;i++){ for(j=0;j<m;j++) printf("%2.0lf ",b[i*m+j]); printf("\n"); } printf("\n\n"); for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%2.0lf ",a[i*n+j]); printf("\n"); } for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%3.0lf ",c[i*n+j]); printf("\n"); } */ printf("Correct!\n"); } } void ref(void){ int i,j,k,l,x,y; for(i=0;i<n;i++) for(j=0;j<n;j++){ k = i-m/2; l = j-m/2; for(x=0;x<m;x++) for(y=0;y<m;y++) if((k+x >= 0 && k+x < m) && (l+y >= 0 && l+y < m)) cref[i*n+j] += a[(k+x)*n + (l+y)]*b[x*m + m]; } } void init(void){ int i,j; for(i=0;i<n;i++) for(j=0;j<n;j++) a[i*n+j] = i+j; //drand48() for(i=0;i<m;i++) for(j=0;j<m;j++) b[i*m+j] = i+j; } void compare(int N, double *wref, double *w){ double maxdiff,this_diff; int numdiffs; int i; numdiffs = 0; maxdiff = 0; for (i=0;i<N;i++) { this_diff = wref[i]-w[i]; if (this_diff < 0) this_diff = -1.0*this_diff; if (this_diff>threshold) { numdiffs++; if (this_diff > maxdiff) maxdiff=this_diff; } } if (numdiffs > 0) printf("%d Diffs found over threshold %f; Max Diff = %f\n", numdiffs,threshold,maxdiff); else printf("No differences found between reference and test versions\n"); } double rtclock(void){ struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } /* __global__ void test_kernel(int N, double *A, double *B, double *C){ //int x=threadIdx.y+blockIdx.y*blockDim.y; //int y=threadIdx.x+blockIdx.x*blockDim.x; double sum; sum=0; __shared__ double Ads[TILE_WIDTH][TILE_WIDTH]; __shared__ double Bds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*TILE_WIDTH + ty; int col = bx*TILE_WIDTH + tx; for(int m=0; m<N/TILE_WIDTH; ++m){ if(row < N && (m*TILE_WIDTH +tx) < N) Ads[ty][tx] = A[row*N + TILE_WIDTH*m + tx]; else Ads[ty][tx] = 0; if(m*TILE_WIDTH + ty < N && col < N) Bds[ty][tx] = B[(m*TILE_WIDTH + ty)*N + col]; else Bds[ty][tx] = 0; __syncthreads(); for(int k=0; k<TILE_WIDTH; ++k) sum += Ads[ty][k]*Bds[k][tx]; __syncthreads(); } if(row < N && col < N) C[row*N + col] = sum; /* if((x<N)&&(y<N)) for (int k=0;k<N;k+=4){ sum += A[x*N+k]*B[y*N+k]; sum += A[x*N+k+1]*B[y*N+k+1]; sum += A[x*N+k+2]*B[y*N+k+2]; sum += A[x*N+k+3]*B[y*N+k+3]; } C[x*N+y]=sum; }*/
#include <unistd.h> #include <stdio.h> #include "hip/hip_runtime.h" #include <sys/time.h> #define threshold 1e-2 #define n (4096) #define m (3) void init(void); void ref(void); #define TILE_SIZE 4 #define KS_DIV_2 (KERNEL_SIZE >> 1) #define KERNEL_SIZE 3 __constant__ double Mc[KERNEL_SIZE*KERNEL_SIZE]; void compare(int N, double *wref, double *w); __global__ void ConvolutionKernel(double* N, double* P, int inp_size){ __shared__ float tileNs[TILE_SIZE][TILE_SIZE]; // get thread indices int tx = threadIdx.x; int ty = threadIdx.y; // get the output indices int row_o = ty + blockIdx.y * TILE_SIZE; int col_o = tx + blockIdx.x * TILE_SIZE; // shift to obtain input indices int row_i = row_o - KS_DIV_2; int col_i = col_o - KS_DIV_2; // Load tile elements if(row_i >= 0 && row_i < inp_size && col_i >= 0 && col_i < inp_size) tileNs[ty][tx] = N[row_i*inp_size + col_i]; else tileNs[ty][tx] = 0.0f; // Wait until all tile elements are loaded __syncthreads(); // only compute if you're an output tile element if(tx < TILE_SIZE && ty < TILE_SIZE){ float pValue = 0.0f; for(int y=0; y<KERNEL_SIZE; y++) for(int x=0; x<KERNEL_SIZE; x++){ pValue += Mc[y*KERNEL_SIZE + x] * tileNs[y+ty][x+tx]; } // only write values if you are inside matrix bounds if(row_o < inp_size && col_o < inp_size) P[row_o*inp_size + col_o] = pValue; } } double rtclock(void); double a[n*n],b[m*m],c[n*n],cref[n*n]; int main(){ int i,j; hipDeviceProp_t dev_prop; hipGetDeviceProperties(&dev_prop,0); printf("dev_prop.totalConstMem = %lu\n",dev_prop.totalConstMem); double clkbegin, clkend, t; double *Nd,*Md,*Pd; dim3 blkDim(TILE_SIZE, TILE_SIZE); dim3 grdDim(n/TILE_SIZE, n/TILE_SIZE); int size_input, size_mask; int M=m, N=n; printf("Input Size = %dx%d\n",n,n); printf("Mask size = %dx%d\n",m,m); init(); clkbegin = rtclock(); ref(); clkend = rtclock(); t = clkend-clkbegin; printf("Seq: Approx GFLOPS: %.6f ; Time = %.6f sec; \n", n*n*m*m/t/1e9,t); size_input = sizeof(double)*n*n; size_mask = sizeof(double)*m*m; hipMalloc((void **) &Nd,size_input); hipMalloc((void **) &Md,size_mask); hipMalloc((void **) &Pd,size_input); hipMemcpyToSymbol(HIP_SYMBOL(Mc), b, size_mask); hipMemcpy(Nd,a,size_input,hipMemcpyHostToDevice); hipMemcpy(Md,b,size_mask,hipMemcpyHostToDevice); clkbegin = rtclock(); //conv1d_basic<<<grid, threads>>>(Nd,Md,Pd,m,n); ConvolutionKernel<<< blkDim , grdDim >>>(Nd,Pd,n); if (hipDeviceSynchronize() != hipSuccess) printf ("Error return for test_kernel\n"); else{ clkend = rtclock(); t = clkend-clkbegin; hipMemcpy(c,Pd,size_input,hipMemcpyDeviceToHost); hipFree(Nd); hipFree(Md); hipFree(Pd); printf("GPU: Approx GFLOPS: %.6f ; Time = %.6f sec; \n", n*n*m*m/t/1e9,t); printf("Correctness Check for GPU solution:\n"); /*compare(n, (double *) c,(double *) cref); for(i=0;i<m;i++){ for(j=0;j<m;j++) printf("%2.0lf ",b[i*m+j]); printf("\n"); } printf("\n\n"); for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%2.0lf ",a[i*n+j]); printf("\n"); } for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%3.0lf ",c[i*n+j]); printf("\n"); } */ printf("Correct!\n"); } } void ref(void){ int i,j,k,l,x,y; for(i=0;i<n;i++) for(j=0;j<n;j++){ k = i-m/2; l = j-m/2; for(x=0;x<m;x++) for(y=0;y<m;y++) if((k+x >= 0 && k+x < m) && (l+y >= 0 && l+y < m)) cref[i*n+j] += a[(k+x)*n + (l+y)]*b[x*m + m]; } } void init(void){ int i,j; for(i=0;i<n;i++) for(j=0;j<n;j++) a[i*n+j] = i+j; //drand48() for(i=0;i<m;i++) for(j=0;j<m;j++) b[i*m+j] = i+j; } void compare(int N, double *wref, double *w){ double maxdiff,this_diff; int numdiffs; int i; numdiffs = 0; maxdiff = 0; for (i=0;i<N;i++) { this_diff = wref[i]-w[i]; if (this_diff < 0) this_diff = -1.0*this_diff; if (this_diff>threshold) { numdiffs++; if (this_diff > maxdiff) maxdiff=this_diff; } } if (numdiffs > 0) printf("%d Diffs found over threshold %f; Max Diff = %f\n", numdiffs,threshold,maxdiff); else printf("No differences found between reference and test versions\n"); } double rtclock(void){ struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } /* __global__ void test_kernel(int N, double *A, double *B, double *C){ //int x=threadIdx.y+blockIdx.y*blockDim.y; //int y=threadIdx.x+blockIdx.x*blockDim.x; double sum; sum=0; __shared__ double Ads[TILE_WIDTH][TILE_WIDTH]; __shared__ double Bds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*TILE_WIDTH + ty; int col = bx*TILE_WIDTH + tx; for(int m=0; m<N/TILE_WIDTH; ++m){ if(row < N && (m*TILE_WIDTH +tx) < N) Ads[ty][tx] = A[row*N + TILE_WIDTH*m + tx]; else Ads[ty][tx] = 0; if(m*TILE_WIDTH + ty < N && col < N) Bds[ty][tx] = B[(m*TILE_WIDTH + ty)*N + col]; else Bds[ty][tx] = 0; __syncthreads(); for(int k=0; k<TILE_WIDTH; ++k) sum += Ads[ty][k]*Bds[k][tx]; __syncthreads(); } if(row < N && col < N) C[row*N + col] = sum; /* if((x<N)&&(y<N)) for (int k=0;k<N;k+=4){ sum += A[x*N+k]*B[y*N+k]; sum += A[x*N+k+1]*B[y*N+k+1]; sum += A[x*N+k+2]*B[y*N+k+2]; sum += A[x*N+k+3]*B[y*N+k+3]; } C[x*N+y]=sum; }*/
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <unistd.h> #include <stdio.h> #include "hip/hip_runtime.h" #include <sys/time.h> #define threshold 1e-2 #define n (4096) #define m (3) void init(void); void ref(void); #define TILE_SIZE 4 #define KS_DIV_2 (KERNEL_SIZE >> 1) #define KERNEL_SIZE 3 __constant__ double Mc[KERNEL_SIZE*KERNEL_SIZE]; void compare(int N, double *wref, double *w); __global__ void ConvolutionKernel(double* N, double* P, int inp_size){ __shared__ float tileNs[TILE_SIZE][TILE_SIZE]; // get thread indices int tx = threadIdx.x; int ty = threadIdx.y; // get the output indices int row_o = ty + blockIdx.y * TILE_SIZE; int col_o = tx + blockIdx.x * TILE_SIZE; // shift to obtain input indices int row_i = row_o - KS_DIV_2; int col_i = col_o - KS_DIV_2; // Load tile elements if(row_i >= 0 && row_i < inp_size && col_i >= 0 && col_i < inp_size) tileNs[ty][tx] = N[row_i*inp_size + col_i]; else tileNs[ty][tx] = 0.0f; // Wait until all tile elements are loaded __syncthreads(); // only compute if you're an output tile element if(tx < TILE_SIZE && ty < TILE_SIZE){ float pValue = 0.0f; for(int y=0; y<KERNEL_SIZE; y++) for(int x=0; x<KERNEL_SIZE; x++){ pValue += Mc[y*KERNEL_SIZE + x] * tileNs[y+ty][x+tx]; } // only write values if you are inside matrix bounds if(row_o < inp_size && col_o < inp_size) P[row_o*inp_size + col_o] = pValue; } } double rtclock(void); double a[n*n],b[m*m],c[n*n],cref[n*n]; int main(){ int i,j; hipDeviceProp_t dev_prop; hipGetDeviceProperties(&dev_prop,0); printf("dev_prop.totalConstMem = %lu\n",dev_prop.totalConstMem); double clkbegin, clkend, t; double *Nd,*Md,*Pd; dim3 blkDim(TILE_SIZE, TILE_SIZE); dim3 grdDim(n/TILE_SIZE, n/TILE_SIZE); int size_input, size_mask; int M=m, N=n; printf("Input Size = %dx%d\n",n,n); printf("Mask size = %dx%d\n",m,m); init(); clkbegin = rtclock(); ref(); clkend = rtclock(); t = clkend-clkbegin; printf("Seq: Approx GFLOPS: %.6f ; Time = %.6f sec; \n", n*n*m*m/t/1e9,t); size_input = sizeof(double)*n*n; size_mask = sizeof(double)*m*m; hipMalloc((void **) &Nd,size_input); hipMalloc((void **) &Md,size_mask); hipMalloc((void **) &Pd,size_input); hipMemcpyToSymbol(HIP_SYMBOL(Mc), b, size_mask); hipMemcpy(Nd,a,size_input,hipMemcpyHostToDevice); hipMemcpy(Md,b,size_mask,hipMemcpyHostToDevice); clkbegin = rtclock(); //conv1d_basic<<<grid, threads>>>(Nd,Md,Pd,m,n); ConvolutionKernel<<< blkDim , grdDim >>>(Nd,Pd,n); if (hipDeviceSynchronize() != hipSuccess) printf ("Error return for test_kernel\n"); else{ clkend = rtclock(); t = clkend-clkbegin; hipMemcpy(c,Pd,size_input,hipMemcpyDeviceToHost); hipFree(Nd); hipFree(Md); hipFree(Pd); printf("GPU: Approx GFLOPS: %.6f ; Time = %.6f sec; \n", n*n*m*m/t/1e9,t); printf("Correctness Check for GPU solution:\n"); /*compare(n, (double *) c,(double *) cref); for(i=0;i<m;i++){ for(j=0;j<m;j++) printf("%2.0lf ",b[i*m+j]); printf("\n"); } printf("\n\n"); for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%2.0lf ",a[i*n+j]); printf("\n"); } for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%3.0lf ",c[i*n+j]); printf("\n"); } */ printf("Correct!\n"); } } void ref(void){ int i,j,k,l,x,y; for(i=0;i<n;i++) for(j=0;j<n;j++){ k = i-m/2; l = j-m/2; for(x=0;x<m;x++) for(y=0;y<m;y++) if((k+x >= 0 && k+x < m) && (l+y >= 0 && l+y < m)) cref[i*n+j] += a[(k+x)*n + (l+y)]*b[x*m + m]; } } void init(void){ int i,j; for(i=0;i<n;i++) for(j=0;j<n;j++) a[i*n+j] = i+j; //drand48() for(i=0;i<m;i++) for(j=0;j<m;j++) b[i*m+j] = i+j; } void compare(int N, double *wref, double *w){ double maxdiff,this_diff; int numdiffs; int i; numdiffs = 0; maxdiff = 0; for (i=0;i<N;i++) { this_diff = wref[i]-w[i]; if (this_diff < 0) this_diff = -1.0*this_diff; if (this_diff>threshold) { numdiffs++; if (this_diff > maxdiff) maxdiff=this_diff; } } if (numdiffs > 0) printf("%d Diffs found over threshold %f; Max Diff = %f\n", numdiffs,threshold,maxdiff); else printf("No differences found between reference and test versions\n"); } double rtclock(void){ struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } /* __global__ void test_kernel(int N, double *A, double *B, double *C){ //int x=threadIdx.y+blockIdx.y*blockDim.y; //int y=threadIdx.x+blockIdx.x*blockDim.x; double sum; sum=0; __shared__ double Ads[TILE_WIDTH][TILE_WIDTH]; __shared__ double Bds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*TILE_WIDTH + ty; int col = bx*TILE_WIDTH + tx; for(int m=0; m<N/TILE_WIDTH; ++m){ if(row < N && (m*TILE_WIDTH +tx) < N) Ads[ty][tx] = A[row*N + TILE_WIDTH*m + tx]; else Ads[ty][tx] = 0; if(m*TILE_WIDTH + ty < N && col < N) Bds[ty][tx] = B[(m*TILE_WIDTH + ty)*N + col]; else Bds[ty][tx] = 0; __syncthreads(); for(int k=0; k<TILE_WIDTH; ++k) sum += Ads[ty][k]*Bds[k][tx]; __syncthreads(); } if(row < N && col < N) C[row*N + col] = sum; /* if((x<N)&&(y<N)) for (int k=0;k<N;k+=4){ sum += A[x*N+k]*B[y*N+k]; sum += A[x*N+k+1]*B[y*N+k+1]; sum += A[x*N+k+2]*B[y*N+k+2]; sum += A[x*N+k+3]*B[y*N+k+3]; } C[x*N+y]=sum; }*/
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17ConvolutionKernelPdS_i .globl _Z17ConvolutionKernelPdS_i .p2align 8 .type _Z17ConvolutionKernelPdS_i,@function _Z17ConvolutionKernelPdS_i: s_load_b32 s6, s[0:1], 0x10 v_bfe_u32 v3, v0, 10, 10 v_dual_mov_b32 v5, 0 :: v_dual_and_b32 v4, 0x3ff, v0 s_mov_b32 s3, exec_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshl_add_u32 v1, s15, 2, v3 v_lshl_add_u32 v0, s14, 2, v4 s_delay_alu instid0(VALU_DEP_1) v_max_i32_e32 v2, v1, v0 v_cmpx_lt_i32_e32 0, v1 s_cbranch_execz .LBB0_4 v_cmp_lt_i32_e32 vcc_lo, 0, v0 s_waitcnt lgkmcnt(0) v_cmp_ge_i32_e64 s2, s6, v2 v_mov_b32_e32 v5, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s4, s2, vcc_lo s_and_saveexec_b32 s2, s4 s_cbranch_execz .LBB0_3 v_add_nc_u32_e32 v5, -1, v1 s_load_b64 s[4:5], s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v5, v5, s6 v_add3_u32 v5, v0, v5, -1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v6, 31, v5 v_lshlrev_b64 v[5:6], 3, v[5:6] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v5, vcc_lo, s4, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo global_load_b64 v[5:6], v[5:6], off s_waitcnt vmcnt(0) v_cvt_f32_f64_e32 v5, v[5:6] .LBB0_3: s_or_b32 exec_lo, exec_lo, s2 .LBB0_4: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_2) s_or_b32 exec_lo, exec_lo, s3 v_lshlrev_b32_e32 v6, 2, v4 v_or_b32_e32 v7, v4, v3 s_mov_b32 s2, exec_lo v_lshl_add_u32 v6, v3, 4, v6 ds_store_b32 v6, v5 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv v_cmpx_gt_u32_e32 4, v7 s_cbranch_execz .LBB0_11 v_lshlrev_b32_e32 v4, 2, v4 s_mov_b32 s7, 0 s_getpc_b64 s[2:3] s_add_u32 s2, s2, Mc@rel32@lo+4 s_addc_u32 s3, s3, Mc@rel32@hi+12 v_lshl_add_u32 v4, v3, 4, v4 v_mov_b32_e32 v3, 0 .p2align 6 .LBB0_6: s_mov_b64 s[4:5], s[2:3] s_mov_b32 s8, 0 .LBB0_7: s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1) v_add_nc_u32_e32 v5, s8, v4 s_load_b64 s[10:11], s[4:5], 0x0 s_add_i32 s8, s8, 4 s_add_u32 s4, s4, 8 s_addc_u32 s5, s5, 0 ds_load_b32 v7, v5 v_cvt_f64_f32_e32 v[5:6], v3 s_cmp_eq_u32 s8, 12 s_waitcnt lgkmcnt(0) v_cvt_f64_f32_e32 v[7:8], v7 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[5:6], s[10:11], v[7:8], v[5:6] v_cvt_f32_f64_e32 v3, v[5:6] s_cbranch_scc0 .LBB0_7 s_add_i32 s7, s7, 1 v_add_nc_u32_e32 v4, 16, v4 s_add_u32 s2, s2, 24 s_addc_u32 s3, s3, 0 s_cmp_eq_u32 s7, 3 s_cbranch_scc0 .LBB0_6 v_cmp_gt_i32_e32 vcc_lo, s6, v2 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_11 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_cvt_f64_f32_e32 v[2:3], v3 s_load_b64 s[0:1], s[0:1], 0x8 v_mad_u64_u32 v[4:5], null, v1, s6, v[0:1] v_ashrrev_i32_e32 v5, 31, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 3, v[4:5] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b64 v[0:1], v[2:3], off .LBB0_11: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17ConvolutionKernelPdS_i .amdhsa_group_segment_fixed_size 64 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 20 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17ConvolutionKernelPdS_i, .Lfunc_end0-_Z17ConvolutionKernelPdS_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .protected Mc .type Mc,@object .section .bss,"aw",@nobits .globl Mc .p2align 4, 0x0 Mc: .zero 72 .size Mc, 72 .type __hip_cuid_,@object .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym Mc .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value .group_segment_fixed_size: 64 .kernarg_segment_align: 8 .kernarg_segment_size: 20 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17ConvolutionKernelPdS_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17ConvolutionKernelPdS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <unistd.h> #include <stdio.h> #include "hip/hip_runtime.h" #include <sys/time.h> #define threshold 1e-2 #define n (4096) #define m (3) void init(void); void ref(void); #define TILE_SIZE 4 #define KS_DIV_2 (KERNEL_SIZE >> 1) #define KERNEL_SIZE 3 __constant__ double Mc[KERNEL_SIZE*KERNEL_SIZE]; void compare(int N, double *wref, double *w); __global__ void ConvolutionKernel(double* N, double* P, int inp_size){ __shared__ float tileNs[TILE_SIZE][TILE_SIZE]; // get thread indices int tx = threadIdx.x; int ty = threadIdx.y; // get the output indices int row_o = ty + blockIdx.y * TILE_SIZE; int col_o = tx + blockIdx.x * TILE_SIZE; // shift to obtain input indices int row_i = row_o - KS_DIV_2; int col_i = col_o - KS_DIV_2; // Load tile elements if(row_i >= 0 && row_i < inp_size && col_i >= 0 && col_i < inp_size) tileNs[ty][tx] = N[row_i*inp_size + col_i]; else tileNs[ty][tx] = 0.0f; // Wait until all tile elements are loaded __syncthreads(); // only compute if you're an output tile element if(tx < TILE_SIZE && ty < TILE_SIZE){ float pValue = 0.0f; for(int y=0; y<KERNEL_SIZE; y++) for(int x=0; x<KERNEL_SIZE; x++){ pValue += Mc[y*KERNEL_SIZE + x] * tileNs[y+ty][x+tx]; } // only write values if you are inside matrix bounds if(row_o < inp_size && col_o < inp_size) P[row_o*inp_size + col_o] = pValue; } } double rtclock(void); double a[n*n],b[m*m],c[n*n],cref[n*n]; int main(){ int i,j; hipDeviceProp_t dev_prop; hipGetDeviceProperties(&dev_prop,0); printf("dev_prop.totalConstMem = %lu\n",dev_prop.totalConstMem); double clkbegin, clkend, t; double *Nd,*Md,*Pd; dim3 blkDim(TILE_SIZE, TILE_SIZE); dim3 grdDim(n/TILE_SIZE, n/TILE_SIZE); int size_input, size_mask; int M=m, N=n; printf("Input Size = %dx%d\n",n,n); printf("Mask size = %dx%d\n",m,m); init(); clkbegin = rtclock(); ref(); clkend = rtclock(); t = clkend-clkbegin; printf("Seq: Approx GFLOPS: %.6f ; Time = %.6f sec; \n", n*n*m*m/t/1e9,t); size_input = sizeof(double)*n*n; size_mask = sizeof(double)*m*m; hipMalloc((void **) &Nd,size_input); hipMalloc((void **) &Md,size_mask); hipMalloc((void **) &Pd,size_input); hipMemcpyToSymbol(HIP_SYMBOL(Mc), b, size_mask); hipMemcpy(Nd,a,size_input,hipMemcpyHostToDevice); hipMemcpy(Md,b,size_mask,hipMemcpyHostToDevice); clkbegin = rtclock(); //conv1d_basic<<<grid, threads>>>(Nd,Md,Pd,m,n); ConvolutionKernel<<< blkDim , grdDim >>>(Nd,Pd,n); if (hipDeviceSynchronize() != hipSuccess) printf ("Error return for test_kernel\n"); else{ clkend = rtclock(); t = clkend-clkbegin; hipMemcpy(c,Pd,size_input,hipMemcpyDeviceToHost); hipFree(Nd); hipFree(Md); hipFree(Pd); printf("GPU: Approx GFLOPS: %.6f ; Time = %.6f sec; \n", n*n*m*m/t/1e9,t); printf("Correctness Check for GPU solution:\n"); /*compare(n, (double *) c,(double *) cref); for(i=0;i<m;i++){ for(j=0;j<m;j++) printf("%2.0lf ",b[i*m+j]); printf("\n"); } printf("\n\n"); for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%2.0lf ",a[i*n+j]); printf("\n"); } for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%3.0lf ",c[i*n+j]); printf("\n"); } */ printf("Correct!\n"); } } void ref(void){ int i,j,k,l,x,y; for(i=0;i<n;i++) for(j=0;j<n;j++){ k = i-m/2; l = j-m/2; for(x=0;x<m;x++) for(y=0;y<m;y++) if((k+x >= 0 && k+x < m) && (l+y >= 0 && l+y < m)) cref[i*n+j] += a[(k+x)*n + (l+y)]*b[x*m + m]; } } void init(void){ int i,j; for(i=0;i<n;i++) for(j=0;j<n;j++) a[i*n+j] = i+j; //drand48() for(i=0;i<m;i++) for(j=0;j<m;j++) b[i*m+j] = i+j; } void compare(int N, double *wref, double *w){ double maxdiff,this_diff; int numdiffs; int i; numdiffs = 0; maxdiff = 0; for (i=0;i<N;i++) { this_diff = wref[i]-w[i]; if (this_diff < 0) this_diff = -1.0*this_diff; if (this_diff>threshold) { numdiffs++; if (this_diff > maxdiff) maxdiff=this_diff; } } if (numdiffs > 0) printf("%d Diffs found over threshold %f; Max Diff = %f\n", numdiffs,threshold,maxdiff); else printf("No differences found between reference and test versions\n"); } double rtclock(void){ struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } /* __global__ void test_kernel(int N, double *A, double *B, double *C){ //int x=threadIdx.y+blockIdx.y*blockDim.y; //int y=threadIdx.x+blockIdx.x*blockDim.x; double sum; sum=0; __shared__ double Ads[TILE_WIDTH][TILE_WIDTH]; __shared__ double Bds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*TILE_WIDTH + ty; int col = bx*TILE_WIDTH + tx; for(int m=0; m<N/TILE_WIDTH; ++m){ if(row < N && (m*TILE_WIDTH +tx) < N) Ads[ty][tx] = A[row*N + TILE_WIDTH*m + tx]; else Ads[ty][tx] = 0; if(m*TILE_WIDTH + ty < N && col < N) Bds[ty][tx] = B[(m*TILE_WIDTH + ty)*N + col]; else Bds[ty][tx] = 0; __syncthreads(); for(int k=0; k<TILE_WIDTH; ++k) sum += Ads[ty][k]*Bds[k][tx]; __syncthreads(); } if(row < N && col < N) C[row*N + col] = sum; /* if((x<N)&&(y<N)) for (int k=0;k<N;k+=4){ sum += A[x*N+k]*B[y*N+k]; sum += A[x*N+k+1]*B[y*N+k+1]; sum += A[x*N+k+2]*B[y*N+k+2]; sum += A[x*N+k+3]*B[y*N+k+3]; } C[x*N+y]=sum; }*/
.text .file "2dconv_with_tiling.hip" .globl _Z32__device_stub__ConvolutionKernelPdS_i # -- Begin function _Z32__device_stub__ConvolutionKernelPdS_i .p2align 4, 0x90 .type _Z32__device_stub__ConvolutionKernelPdS_i,@function _Z32__device_stub__ConvolutionKernelPdS_i: # @_Z32__device_stub__ConvolutionKernelPdS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z17ConvolutionKernelPdS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z32__device_stub__ConvolutionKernelPdS_i, .Lfunc_end0-_Z32__device_stub__ConvolutionKernelPdS_i .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI1_0: .quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7 .LCPI1_1: .quad 0x41a2000000000000 # double 150994944 .LCPI1_2: .quad 0x41cdcd6500000000 # double 1.0E+9 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $1624, %rsp # imm = 0x658 .cfi_def_cfa_offset 1680 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 leaq 152(%rsp), %rdi xorl %ebx, %ebx xorl %esi, %esi callq hipGetDevicePropertiesR0600 movq 504(%rsp), %rsi movl $.L.str, %edi xorl %eax, %eax callq printf movl $.L.str.1, %edi movl $4096, %esi # imm = 0x1000 movl $4096, %edx # imm = 0x1000 xorl %eax, %eax callq printf movl $.L.str.2, %edi movl $3, %esi movl $3, %edx xorl %eax, %eax callq printf movl $a, %eax .p2align 4, 0x90 .LBB1_1: # %.preheader19.i # =>This Loop Header: Depth=1 # Child Loop BB1_2 Depth 2 xorl %ecx, %ecx .p2align 4, 0x90 .LBB1_2: # Parent Loop BB1_1 Depth=1 # => This Inner Loop Header: Depth=2 leal (%rbx,%rcx), %edx xorps %xmm0, %xmm0 cvtsi2sd %edx, %xmm0 movsd %xmm0, (%rax,%rcx,8) incq %rcx cmpq $4096, %rcx # imm = 0x1000 jne .LBB1_2 # %bb.3: # in Loop: Header=BB1_1 Depth=1 incq %rbx addq $32768, %rax # imm = 0x8000 cmpq $4096, %rbx # imm = 0x1000 jne .LBB1_1 # %bb.4: # %.preheader.i.preheader movl $b, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB1_5: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB1_6 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB1_6: # Parent Loop BB1_5 Depth=1 # => This Inner Loop Header: Depth=2 leal (%rcx,%rdx), %esi xorps %xmm0, %xmm0 cvtsi2sd %esi, %xmm0 movsd %xmm0, (%rax,%rdx,8) incq %rdx cmpq $3, %rdx jne .LBB1_6 # %bb.7: # in Loop: Header=BB1_5 Depth=1 incq %rcx addq $24, %rax cmpq $3, %rcx jne .LBB1_5 # %bb.8: # %_Z4initv.exit leaq 16(%rsp), %rdi leaq 48(%rsp), %rsi callq gettimeofday testl %eax, %eax je .LBB1_10 # %bb.9: movl $.L.str.10, %edi movl %eax, %esi xorl %eax, %eax callq printf .LBB1_10: # %_Z7rtclockv.exit movq 16(%rsp), %rax cvtsi2sdq 24(%rsp), %xmm1 mulsd .LCPI1_0(%rip), %xmm1 leaq a-32776(%rip), %rcx xorl %edx, %edx jmp .LBB1_11 .p2align 4, 0x90 .LBB1_20: # in Loop: Header=BB1_11 Depth=1 incq %rdx addq $32768, %rcx # imm = 0x8000 cmpq $4096, %rdx # imm = 0x1000 je .LBB1_21 .LBB1_11: # %.preheader31.i # =>This Loop Header: Depth=1 # Child Loop BB1_12 Depth 2 # Child Loop BB1_13 Depth 3 # Child Loop BB1_14 Depth 4 leaq -1(%rdx), %rsi movq %rdx, %rdi shlq $12, %rdi movq $-1, %r8 movq %rcx, %r9 xorl %r10d, %r10d jmp .LBB1_12 .p2align 4, 0x90 .LBB1_19: # in Loop: Header=BB1_12 Depth=2 incq %r10 addq $8, %r9 incq %r8 cmpq $4096, %r10 # imm = 0x1000 je .LBB1_20 .LBB1_12: # Parent Loop BB1_11 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB1_13 Depth 3 # Child Loop BB1_14 Depth 4 leaq (%r10,%rdi), %r11 movq %r9, %rbx xorl %r14d, %r14d jmp .LBB1_13 .p2align 4, 0x90 .LBB1_18: # in Loop: Header=BB1_13 Depth=3 incq %r14 addq $32768, %rbx # imm = 0x8000 cmpq $3, %r14 je .LBB1_19 .LBB1_13: # %.preheader.i24 # Parent Loop BB1_11 Depth=1 # Parent Loop BB1_12 Depth=2 # => This Loop Header: Depth=3 # Child Loop BB1_14 Depth 4 leal (%rsi,%r14), %ebp leaq (%r14,%r14,2), %r15 xorl %r12d, %r12d jmp .LBB1_14 .p2align 4, 0x90 .LBB1_17: # in Loop: Header=BB1_14 Depth=4 incq %r12 cmpq $3, %r12 je .LBB1_18 .LBB1_14: # Parent Loop BB1_11 Depth=1 # Parent Loop BB1_12 Depth=2 # Parent Loop BB1_13 Depth=3 # => This Inner Loop Header: Depth=4 cmpl $2, %ebp ja .LBB1_17 # %bb.15: # in Loop: Header=BB1_14 Depth=4 leal (%r8,%r12), %r13d cmpl $2, %r13d ja .LBB1_17 # %bb.16: # in Loop: Header=BB1_14 Depth=4 movsd (%rbx,%r12,8), %xmm0 # xmm0 = mem[0],zero mulsd b+24(,%r15,8), %xmm0 addsd cref(,%r11,8), %xmm0 movsd %xmm0, cref(,%r11,8) jmp .LBB1_17 .LBB1_21: # %_Z3refv.exit xorps %xmm0, %xmm0 cvtsi2sd %rax, %xmm0 addsd %xmm0, %xmm1 movsd %xmm1, 8(%rsp) # 8-byte Spill leaq 16(%rsp), %rdi leaq 48(%rsp), %rsi callq gettimeofday testl %eax, %eax je .LBB1_23 # %bb.22: movl $.L.str.10, %edi movl %eax, %esi xorl %eax, %eax callq printf .LBB1_23: # %_Z7rtclockv.exit29 xorps %xmm0, %xmm0 cvtsi2sdq 16(%rsp), %xmm0 xorps %xmm1, %xmm1 cvtsi2sdq 24(%rsp), %xmm1 mulsd .LCPI1_0(%rip), %xmm1 addsd %xmm0, %xmm1 subsd 8(%rsp), %xmm1 # 8-byte Folded Reload movsd .LCPI1_1(%rip), %xmm0 # xmm0 = mem[0],zero divsd %xmm1, %xmm0 divsd .LCPI1_2(%rip), %xmm0 movl $.L.str.3, %edi movb $2, %al callq printf leaq 72(%rsp), %rdi movl $134217728, %esi # imm = 0x8000000 callq hipMalloc leaq 88(%rsp), %rdi movl $72, %esi callq hipMalloc leaq 64(%rsp), %rdi movl $134217728, %esi # imm = 0x8000000 callq hipMalloc movl $Mc, %edi movl $b, %esi movl $72, %edx xorl %ecx, %ecx movl $1, %r8d callq hipMemcpyToSymbol movq 72(%rsp), %rdi movl $a, %esi movl $134217728, %edx # imm = 0x8000000 movl $1, %ecx callq hipMemcpy movq 88(%rsp), %rdi movl $b, %esi movl $72, %edx movl $1, %ecx callq hipMemcpy leaq 16(%rsp), %rdi leaq 48(%rsp), %rsi callq gettimeofday testl %eax, %eax je .LBB1_25 # %bb.24: movl $.L.str.10, %edi movl %eax, %esi xorl %eax, %eax callq printf .LBB1_25: # %_Z7rtclockv.exit31 xorps %xmm0, %xmm0 cvtsi2sdq 16(%rsp), %xmm0 movsd %xmm0, 96(%rsp) # 8-byte Spill xorps %xmm0, %xmm0 cvtsi2sdq 24(%rsp), %xmm0 movsd %xmm0, 8(%rsp) # 8-byte Spill movabsq $17179869188, %rdi # imm = 0x400000004 movabsq $4398046512128, %rdx # imm = 0x40000000400 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_27 # %bb.26: movq 72(%rsp), %rax movq 64(%rsp), %rcx movq %rax, 144(%rsp) movq %rcx, 136(%rsp) movl $4096, 84(%rsp) # imm = 0x1000 leaq 144(%rsp), %rax movq %rax, 16(%rsp) leaq 136(%rsp), %rax movq %rax, 24(%rsp) leaq 84(%rsp), %rax movq %rax, 32(%rsp) leaq 48(%rsp), %rdi leaq 120(%rsp), %rsi leaq 112(%rsp), %rdx leaq 104(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 120(%rsp), %rcx movl 128(%rsp), %r8d leaq 16(%rsp), %r9 movl $_Z17ConvolutionKernelPdS_i, %edi pushq 104(%rsp) .cfi_adjust_cfa_offset 8 pushq 120(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_27: callq hipDeviceSynchronize movl $.Lstr.2, %edi testl %eax, %eax jne .LBB1_31 # %bb.28: movsd 8(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero mulsd .LCPI1_0(%rip), %xmm0 addsd 96(%rsp), %xmm0 # 8-byte Folded Reload movsd %xmm0, 8(%rsp) # 8-byte Spill leaq 16(%rsp), %rdi leaq 48(%rsp), %rsi callq gettimeofday testl %eax, %eax je .LBB1_30 # %bb.29: movl $.L.str.10, %edi movl %eax, %esi xorl %eax, %eax callq printf .LBB1_30: # %_Z7rtclockv.exit33 xorps %xmm0, %xmm0 cvtsi2sdq 16(%rsp), %xmm0 cvtsi2sdq 24(%rsp), %xmm1 mulsd .LCPI1_0(%rip), %xmm1 addsd %xmm0, %xmm1 subsd 8(%rsp), %xmm1 # 8-byte Folded Reload movsd %xmm1, 8(%rsp) # 8-byte Spill movq 64(%rsp), %rsi movl $c, %edi movl $134217728, %edx # imm = 0x8000000 movl $2, %ecx callq hipMemcpy movq 72(%rsp), %rdi callq hipFree movq 88(%rsp), %rdi callq hipFree movq 64(%rsp), %rdi callq hipFree movsd .LCPI1_1(%rip), %xmm0 # xmm0 = mem[0],zero movsd 8(%rsp), %xmm1 # 8-byte Reload # xmm1 = mem[0],zero divsd %xmm1, %xmm0 divsd .LCPI1_2(%rip), %xmm0 movl $.L.str.5, %edi movb $2, %al callq printf movl $.Lstr, %edi callq puts@PLT movl $.Lstr.1, %edi .LBB1_31: callq puts@PLT xorl %eax, %eax addq $1624, %rsp # imm = 0x658 .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .globl _Z4initv # -- Begin function _Z4initv .p2align 4, 0x90 .type _Z4initv,@function _Z4initv: # @_Z4initv .cfi_startproc # %bb.0: movl $a, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB2_1: # %.preheader19 # =>This Loop Header: Depth=1 # Child Loop BB2_2 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB2_2: # Parent Loop BB2_1 Depth=1 # => This Inner Loop Header: Depth=2 leal (%rcx,%rdx), %esi xorps %xmm0, %xmm0 cvtsi2sd %esi, %xmm0 movsd %xmm0, (%rax,%rdx,8) incq %rdx cmpq $4096, %rdx # imm = 0x1000 jne .LBB2_2 # %bb.3: # in Loop: Header=BB2_1 Depth=1 incq %rcx addq $32768, %rax # imm = 0x8000 cmpq $4096, %rcx # imm = 0x1000 jne .LBB2_1 # %bb.4: # %.preheader.preheader movl $b, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB2_5: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB2_6 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB2_6: # Parent Loop BB2_5 Depth=1 # => This Inner Loop Header: Depth=2 leal (%rcx,%rdx), %esi xorps %xmm0, %xmm0 cvtsi2sd %esi, %xmm0 movsd %xmm0, (%rax,%rdx,8) incq %rdx cmpq $3, %rdx jne .LBB2_6 # %bb.7: # in Loop: Header=BB2_5 Depth=1 incq %rcx addq $24, %rax cmpq $3, %rcx jne .LBB2_5 # %bb.8: retq .Lfunc_end2: .size _Z4initv, .Lfunc_end2-_Z4initv .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z7rtclockv .LCPI3_0: .quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7 .text .globl _Z7rtclockv .p2align 4, 0x90 .type _Z7rtclockv,@function _Z7rtclockv: # @_Z7rtclockv .cfi_startproc # %bb.0: subq $24, %rsp .cfi_def_cfa_offset 32 movq %rsp, %rdi leaq 16(%rsp), %rsi callq gettimeofday testl %eax, %eax je .LBB3_2 # %bb.1: movl $.L.str.10, %edi movl %eax, %esi xorl %eax, %eax callq printf .LBB3_2: cvtsi2sdq (%rsp), %xmm1 cvtsi2sdq 8(%rsp), %xmm0 mulsd .LCPI3_0(%rip), %xmm0 addsd %xmm1, %xmm0 addq $24, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size _Z7rtclockv, .Lfunc_end3-_Z7rtclockv .cfi_endproc # -- End function .globl _Z3refv # -- Begin function _Z3refv .p2align 4, 0x90 .type _Z3refv,@function _Z3refv: # @_Z3refv .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 leaq a-32776(%rip), %rax xorl %ecx, %ecx jmp .LBB4_1 .p2align 4, 0x90 .LBB4_10: # in Loop: Header=BB4_1 Depth=1 incq %rcx addq $32768, %rax # imm = 0x8000 cmpq $4096, %rcx # imm = 0x1000 je .LBB4_11 .LBB4_1: # %.preheader31 # =>This Loop Header: Depth=1 # Child Loop BB4_2 Depth 2 # Child Loop BB4_3 Depth 3 # Child Loop BB4_4 Depth 4 leaq -1(%rcx), %rdx movq %rcx, %rsi shlq $12, %rsi movq $-1, %rdi movq %rax, %r8 xorl %r9d, %r9d jmp .LBB4_2 .p2align 4, 0x90 .LBB4_9: # in Loop: Header=BB4_2 Depth=2 incq %r9 addq $8, %r8 incq %rdi cmpq $4096, %r9 # imm = 0x1000 je .LBB4_10 .LBB4_2: # Parent Loop BB4_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB4_3 Depth 3 # Child Loop BB4_4 Depth 4 leaq (%r9,%rsi), %r10 movq %r8, %r11 xorl %ebx, %ebx jmp .LBB4_3 .p2align 4, 0x90 .LBB4_8: # in Loop: Header=BB4_3 Depth=3 incq %rbx addq $32768, %r11 # imm = 0x8000 cmpq $3, %rbx je .LBB4_9 .LBB4_3: # %.preheader # Parent Loop BB4_1 Depth=1 # Parent Loop BB4_2 Depth=2 # => This Loop Header: Depth=3 # Child Loop BB4_4 Depth 4 leal (%rdx,%rbx), %ebp leaq (%rbx,%rbx,2), %r14 xorl %r15d, %r15d jmp .LBB4_4 .p2align 4, 0x90 .LBB4_7: # in Loop: Header=BB4_4 Depth=4 incq %r15 cmpq $3, %r15 je .LBB4_8 .LBB4_4: # Parent Loop BB4_1 Depth=1 # Parent Loop BB4_2 Depth=2 # Parent Loop BB4_3 Depth=3 # => This Inner Loop Header: Depth=4 cmpl $2, %ebp ja .LBB4_7 # %bb.5: # in Loop: Header=BB4_4 Depth=4 leal (%rdi,%r15), %r12d cmpl $2, %r12d ja .LBB4_7 # %bb.6: # in Loop: Header=BB4_4 Depth=4 movsd (%r11,%r15,8), %xmm0 # xmm0 = mem[0],zero mulsd b+24(,%r14,8), %xmm0 addsd cref(,%r10,8), %xmm0 movsd %xmm0, cref(,%r10,8) jmp .LBB4_7 .LBB4_11: popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _Z3refv, .Lfunc_end4-_Z3refv .cfi_endproc # -- End function .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 # -- Begin function _Z7compareiPdS_ .LCPI5_0: .quad 0x8000000000000000 # double -0 .quad 0x8000000000000000 # double -0 .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI5_1: .quad 0x3f847ae147ae147b # double 0.01 .text .globl _Z7compareiPdS_ .p2align 4, 0x90 .type _Z7compareiPdS_,@function _Z7compareiPdS_: # @_Z7compareiPdS_ .cfi_startproc # %bb.0: testl %edi, %edi jle .LBB5_1 # %bb.2: # %.lr.ph.preheader movq %rsi, %rax movl %edi, %ecx xorpd %xmm3, %xmm3 xorl %edi, %edi movapd .LCPI5_0(%rip), %xmm0 # xmm0 = [-0.0E+0,-0.0E+0] movsd .LCPI5_1(%rip), %xmm2 # xmm2 = mem[0],zero xorl %esi, %esi jmp .LBB5_3 .p2align 4, 0x90 .LBB5_5: # in Loop: Header=BB5_3 Depth=1 movapd %xmm3, %xmm1 .LBB5_6: # in Loop: Header=BB5_3 Depth=1 incq %rdi movapd %xmm1, %xmm3 cmpq %rdi, %rcx je .LBB5_7 .LBB5_3: # %.lr.ph # =>This Inner Loop Header: Depth=1 movsd (%rax,%rdi,8), %xmm4 # xmm4 = mem[0],zero subsd (%rdx,%rdi,8), %xmm4 movapd %xmm4, %xmm1 xorpd %xmm0, %xmm1 maxsd %xmm4, %xmm1 ucomisd %xmm2, %xmm1 jbe .LBB5_5 # %bb.4: # in Loop: Header=BB5_3 Depth=1 incl %esi ucomisd %xmm3, %xmm1 jbe .LBB5_5 jmp .LBB5_6 .LBB5_1: xorl %esi, %esi xorpd %xmm1, %xmm1 .LBB5_7: # %._crit_edge testl %esi, %esi jle .LBB5_9 # %bb.8: movsd .LCPI5_1(%rip), %xmm0 # xmm0 = mem[0],zero movl $.L.str.8, %edi movb $2, %al jmp printf # TAILCALL .LBB5_9: movl $.Lstr.3, %edi jmp puts@PLT # TAILCALL .Lfunc_end5: .size _Z7compareiPdS_, .Lfunc_end5-_Z7compareiPdS_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17ConvolutionKernelPdS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $0, 8(%rsp) movl $1, (%rsp) movl $Mc, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movl $72, %r9d movq %rbx, %rdi xorl %r8d, %r8d callq __hipRegisterVar movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type Mc,@object # @Mc .local Mc .comm Mc,72,16 .type _Z17ConvolutionKernelPdS_i,@object # @_Z17ConvolutionKernelPdS_i .section .rodata,"a",@progbits .globl _Z17ConvolutionKernelPdS_i .p2align 3, 0x0 _Z17ConvolutionKernelPdS_i: .quad _Z32__device_stub__ConvolutionKernelPdS_i .size _Z17ConvolutionKernelPdS_i, 8 .type a,@object # @a .bss .globl a .p2align 4, 0x0 a: .zero 134217728 .size a, 134217728 .type b,@object # @b .globl b .p2align 4, 0x0 b: .zero 72 .size b, 72 .type c,@object # @c .globl c .p2align 4, 0x0 c: .zero 134217728 .size c, 134217728 .type cref,@object # @cref .globl cref .p2align 4, 0x0 cref: .zero 134217728 .size cref, 134217728 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "dev_prop.totalConstMem = %lu\n" .size .L.str, 30 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Input Size = %dx%d\n" .size .L.str.1, 20 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "Mask size = %dx%d\n" .size .L.str.2, 20 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Seq: Approx GFLOPS: %.6f .size .L.str.3, 46 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "GPU: Approx GFLOPS: %.6f .size .L.str.5, 46 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "%d Diffs found over threshold %f .size .L.str.8, 49 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "Error return from gettimeofday: %d" .size .L.str.10, 35 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z17ConvolutionKernelPdS_i" .size .L__unnamed_1, 27 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "Mc" .size .L__unnamed_2, 3 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Correctness Check for GPU solution:" .size .Lstr, 36 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "Correct!" .size .Lstr.1, 9 .type .Lstr.2,@object # @str.2 .Lstr.2: .asciz "Error return for test_kernel" .size .Lstr.2, 29 .type .Lstr.3,@object # @str.3 .Lstr.3: .asciz "No differences found between reference and test versions" .size .Lstr.3, 57 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z32__device_stub__ConvolutionKernelPdS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym Mc .addrsig_sym _Z17ConvolutionKernelPdS_i .addrsig_sym a .addrsig_sym b .addrsig_sym c .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z17ConvolutionKernelPdS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */ /* 0x000e220000002600 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R9, SR_TID.Y ; /* 0x0000000000097919 */ /* 0x000e280000002200 */ /*0040*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */ /* 0x000e680000002100 */ /*0050*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e620000002500 */ /*0060*/ IMAD R0, R0, 0x4, R9 ; /* 0x0000000400007824 */ /* 0x001fca00078e0209 */ /*0070*/ ISETP.GT.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */ /* 0x000fe20003f04270 */ /*0080*/ IMAD R3, R3, 0x4, R6 ; /* 0x0000000403037824 */ /* 0x002fc600078e0206 */ /*0090*/ ISETP.GT.AND P0, PT, R0, RZ, !P0 ; /* 0x000000ff0000720c */ /* 0x000fc80004704270 */ /*00a0*/ ISETP.GT.AND P0, PT, R3, RZ, P0 ; /* 0x000000ff0300720c */ /* 0x000fc80000704270 */ /*00b0*/ ISETP.LE.AND P0, PT, R3, c[0x0][0x170], P0 ; /* 0x00005c0003007a0c */ /* 0x000fda0000703270 */ /*00c0*/ @P0 IADD3 R2, R0, -0x1, RZ ; /* 0xffffffff00020810 */ /* 0x000fe20007ffe0ff */ /*00d0*/ @P0 IMAD.MOV.U32 R5, RZ, RZ, 0x8 ; /* 0x00000008ff050424 */ /* 0x000fc800078e00ff */ /*00e0*/ @P0 IMAD R2, R2, c[0x0][0x170], R3 ; /* 0x00005c0002020a24 */ /* 0x000fca00078e0203 */ /*00f0*/ @P0 IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02020810 */ /* 0x000fca0007ffe0ff */ /*0100*/ @P0 IMAD.WIDE R4, R2, R5, c[0x0][0x160] ; /* 0x0000580002040625 */ /* 0x000fcc00078e0205 */ /*0110*/ @P0 LDG.E.64 R4, [R4.64] ; /* 0x0000000404040981 */ /* 0x000ea2000c1e1b00 */ /*0120*/ IMAD R2, R9.reuse, 0x4, R6 ; /* 0x0000000409027824 */ /* 0x040fe200078e0206 */ /*0130*/ ISETP.GT.AND P1, PT, R9, 0x3, PT ; /* 0x000000030900780c */ /* 0x000fc80003f24270 */ /*0140*/ @!P0 STS [R2.X4], RZ ; /* 0x000000ff02008388 */ /* 0x0001e20000004800 */ /*0150*/ ISETP.GT.OR P1, PT, R6, 0x3, P1 ; /* 0x000000030600780c */ /* 0x000fe20000f24670 */ /*0160*/ @P0 F2F.F32.F64 R7, R4 ; /* 0x0000000400070310 */ /* 0x004e640000301000 */ /*0170*/ @P0 STS [R2.X4], R7 ; /* 0x0000000702000388 */ /* 0x0021e80000004800 */ /*0180*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0190*/ @P1 EXIT ; /* 0x000000000000194d */ /* 0x000fea0003800000 */ /*01a0*/ LDS R12, [R2.X4] ; /* 0x00000000020c7984 */ /* 0x001e220000004800 */ /*01b0*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */ /* 0x000fc60003f06270 */ /*01c0*/ LDS R14, [R2.X4+0x4] ; /* 0x00000400020e7984 */ /* 0x000e620000004800 */ /*01d0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */ /* 0x000fc60000706670 */ /*01e0*/ LDS R16, [R2.X4+0x8] ; /* 0x0000080002107984 */ /* 0x000fe80000004800 */ /*01f0*/ LDS R17, [R2.X4+0x10] ; /* 0x0000100002117984 */ /* 0x000ea80000004800 */ /*0200*/ LDS R18, [R2.X4+0x18] ; /* 0x0000180002127984 */ /* 0x000fe20000004800 */ /*0210*/ F2F.F64.F32 R4, R12 ; /* 0x0000000c00047310 */ /* 0x001e300000201800 */ /*0220*/ F2F.F64.F32 R6, R14 ; /* 0x0000000e00067310 */ /* 0x0023e20000201800 */ /*0230*/ DFMA R4, R4, c[0x3][0x0], RZ ; /* 0x00c0000004047a2b */ /* 0x001e2200000000ff */ /*0240*/ LDS R14, [R2.X4+0x14] ; /* 0x00001400020e7984 */ /* 0x002e6c0000004800 */ /*0250*/ F2F.F32.F64 R13, R4 ; /* 0x00000004000d7310 */ /* 0x001e300000301000 */ /*0260*/ F2F.F64.F32 R4, R17 ; /* 0x0000001100047310 */ /* 0x004ff00000201800 */ /*0270*/ F2F.F64.F32 R8, R13 ; /* 0x0000000d00087310 */ /* 0x001e240000201800 */ /*0280*/ DFMA R6, R6, c[0x3][0x8], R8 ; /* 0x00c0020006067a2b */ /* 0x00108c0000000008 */ /*0290*/ F2F.F64.F32 R8, R16 ; /* 0x0000001000087310 */ /* 0x0011f00000201800 */ /*02a0*/ F2F.F32.F64 R15, R6 ; /* 0x00000006000f7310 */ /* 0x004ea20000301000 */ /*02b0*/ LDS R16, [R2.X4+0x20] ; /* 0x0000200002107984 */ /* 0x001e2e0000004800 */ /*02c0*/ F2F.F64.F32 R6, R14 ; /* 0x0000000e00067310 */ /* 0x0023f00000201800 */ /*02d0*/ F2F.F64.F32 R10, R15 ; /* 0x0000000f000a7310 */ /* 0x004ea20000201800 */ /*02e0*/ LDS R14, [R2.X4+0x24] ; /* 0x00002400020e7984 */ /* 0x002e620000004800 */ /*02f0*/ DFMA R8, R8, c[0x3][0x10], R10 ; /* 0x00c0040008087a2b */ /* 0x004e8c000000000a */ /*0300*/ F2F.F32.F64 R12, R8 ; /* 0x00000008000c7310 */ /* 0x004eb00000301000 */ /*0310*/ F2F.F64.F32 R8, R18 ; /* 0x0000001200087310 */ /* 0x000ff00000201800 */ /*0320*/ F2F.F64.F32 R10, R12 ; /* 0x0000000c000a7310 */ /* 0x004ea40000201800 */ /*0330*/ DFMA R4, R4, c[0x3][0x18], R10 ; /* 0x00c0060004047a2b */ /* 0x004e8c000000000a */ /*0340*/ F2F.F32.F64 R13, R4 ; /* 0x00000004000d7310 */ /* 0x004eb00000301000 */ /*0350*/ F2F.F64.F32 R4, R16 ; /* 0x0000001000047310 */ /* 0x001ff00000201800 */ /*0360*/ F2F.F64.F32 R10, R13 ; /* 0x0000000d000a7310 */ /* 0x0040a40000201800 */ /*0370*/ LDS R13, [R2.X4+0x28] ; /* 0x00002800020d7984 */ /* 0x001e220000004800 */ /*0380*/ DFMA R6, R6, c[0x3][0x20], R10 ; /* 0x00c0080006067a2b */ /* 0x004e8c000000000a */ /*0390*/ F2F.F32.F64 R15, R6 ; /* 0x00000006000f7310 */ /* 0x004eb00000301000 */ /*03a0*/ F2F.F64.F32 R6, R14 ; /* 0x0000000e00067310 */ /* 0x002ff00000201800 */ /*03b0*/ F2F.F64.F32 R10, R15 ; /* 0x0000000f000a7310 */ /* 0x004e640000201800 */ /*03c0*/ DFMA R8, R8, c[0x3][0x28], R10 ; /* 0x00c00a0008087a2b */ /* 0x002e4c000000000a */ /*03d0*/ F2F.F32.F64 R12, R8 ; /* 0x00000008000c7310 */ /* 0x002e700000301000 */ /*03e0*/ F2F.F64.F32 R8, R13 ; /* 0x0000000d00087310 */ /* 0x001ff00000201800 */ /*03f0*/ F2F.F64.F32 R10, R12 ; /* 0x0000000c000a7310 */ /* 0x002e240000201800 */ /*0400*/ DFMA R4, R4, c[0x3][0x30], R10 ; /* 0x00c00c0004047a2b */ /* 0x001e14000000000a */ /*0410*/ F2F.F32.F64 R4, R4 ; /* 0x0000000400047310 */ /* 0x001e300000301000 */ /*0420*/ F2F.F64.F32 R10, R4 ; /* 0x00000004000a7310 */ /* 0x001e240000201800 */ /*0430*/ DFMA R6, R6, c[0x3][0x38], R10 ; /* 0x00c00e0006067a2b */ /* 0x001e14000000000a */ /*0440*/ F2F.F32.F64 R6, R6 ; /* 0x0000000600067310 */ /* 0x001e300000301000 */ /*0450*/ F2F.F64.F32 R10, R6 ; /* 0x00000006000a7310 */ /* 0x001e240000201800 */ /*0460*/ DFMA R8, R8, c[0x3][0x40], R10 ; /* 0x00c0100008087a2b */ /* 0x001062000000000a */ /*0470*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000ff40003800000 */ /*0480*/ F2F.F32.F64 R4, R8 ; /* 0x0000000800047310 */ /* 0x002e620000301000 */ /*0490*/ IMAD.MOV.U32 R2, RZ, RZ, 0x8 ; /* 0x00000008ff027424 */ /* 0x000fc400078e00ff */ /*04a0*/ IMAD R3, R0, c[0x0][0x170], R3 ; /* 0x00005c0000037a24 */ /* 0x000fc800078e0203 */ /*04b0*/ IMAD.WIDE R2, R3, R2, c[0x0][0x168] ; /* 0x00005a0003027625 */ /* 0x000fe200078e0202 */ /*04c0*/ F2F.F64.F32 R4, R4 ; /* 0x0000000400047310 */ /* 0x002e680000201800 */ /*04d0*/ STG.E.64 [R2.64], R4 ; /* 0x0000000402007986 */ /* 0x002fe2000c101b04 */ /*04e0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*04f0*/ BRA 0x4f0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0500*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0510*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0520*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0530*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0540*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0550*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0560*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0570*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17ConvolutionKernelPdS_i .globl _Z17ConvolutionKernelPdS_i .p2align 8 .type _Z17ConvolutionKernelPdS_i,@function _Z17ConvolutionKernelPdS_i: s_load_b32 s6, s[0:1], 0x10 v_bfe_u32 v3, v0, 10, 10 v_dual_mov_b32 v5, 0 :: v_dual_and_b32 v4, 0x3ff, v0 s_mov_b32 s3, exec_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshl_add_u32 v1, s15, 2, v3 v_lshl_add_u32 v0, s14, 2, v4 s_delay_alu instid0(VALU_DEP_1) v_max_i32_e32 v2, v1, v0 v_cmpx_lt_i32_e32 0, v1 s_cbranch_execz .LBB0_4 v_cmp_lt_i32_e32 vcc_lo, 0, v0 s_waitcnt lgkmcnt(0) v_cmp_ge_i32_e64 s2, s6, v2 v_mov_b32_e32 v5, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s4, s2, vcc_lo s_and_saveexec_b32 s2, s4 s_cbranch_execz .LBB0_3 v_add_nc_u32_e32 v5, -1, v1 s_load_b64 s[4:5], s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v5, v5, s6 v_add3_u32 v5, v0, v5, -1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v6, 31, v5 v_lshlrev_b64 v[5:6], 3, v[5:6] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v5, vcc_lo, s4, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo global_load_b64 v[5:6], v[5:6], off s_waitcnt vmcnt(0) v_cvt_f32_f64_e32 v5, v[5:6] .LBB0_3: s_or_b32 exec_lo, exec_lo, s2 .LBB0_4: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_2) s_or_b32 exec_lo, exec_lo, s3 v_lshlrev_b32_e32 v6, 2, v4 v_or_b32_e32 v7, v4, v3 s_mov_b32 s2, exec_lo v_lshl_add_u32 v6, v3, 4, v6 ds_store_b32 v6, v5 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv v_cmpx_gt_u32_e32 4, v7 s_cbranch_execz .LBB0_11 v_lshlrev_b32_e32 v4, 2, v4 s_mov_b32 s7, 0 s_getpc_b64 s[2:3] s_add_u32 s2, s2, Mc@rel32@lo+4 s_addc_u32 s3, s3, Mc@rel32@hi+12 v_lshl_add_u32 v4, v3, 4, v4 v_mov_b32_e32 v3, 0 .p2align 6 .LBB0_6: s_mov_b64 s[4:5], s[2:3] s_mov_b32 s8, 0 .LBB0_7: s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1) v_add_nc_u32_e32 v5, s8, v4 s_load_b64 s[10:11], s[4:5], 0x0 s_add_i32 s8, s8, 4 s_add_u32 s4, s4, 8 s_addc_u32 s5, s5, 0 ds_load_b32 v7, v5 v_cvt_f64_f32_e32 v[5:6], v3 s_cmp_eq_u32 s8, 12 s_waitcnt lgkmcnt(0) v_cvt_f64_f32_e32 v[7:8], v7 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[5:6], s[10:11], v[7:8], v[5:6] v_cvt_f32_f64_e32 v3, v[5:6] s_cbranch_scc0 .LBB0_7 s_add_i32 s7, s7, 1 v_add_nc_u32_e32 v4, 16, v4 s_add_u32 s2, s2, 24 s_addc_u32 s3, s3, 0 s_cmp_eq_u32 s7, 3 s_cbranch_scc0 .LBB0_6 v_cmp_gt_i32_e32 vcc_lo, s6, v2 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_11 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_cvt_f64_f32_e32 v[2:3], v3 s_load_b64 s[0:1], s[0:1], 0x8 v_mad_u64_u32 v[4:5], null, v1, s6, v[0:1] v_ashrrev_i32_e32 v5, 31, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 3, v[4:5] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b64 v[0:1], v[2:3], off .LBB0_11: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17ConvolutionKernelPdS_i .amdhsa_group_segment_fixed_size 64 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 20 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17ConvolutionKernelPdS_i, .Lfunc_end0-_Z17ConvolutionKernelPdS_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .protected Mc .type Mc,@object .section .bss,"aw",@nobits .globl Mc .p2align 4, 0x0 Mc: .zero 72 .size Mc, 72 .type __hip_cuid_,@object .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym Mc .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value .group_segment_fixed_size: 64 .kernarg_segment_align: 8 .kernarg_segment_size: 20 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17ConvolutionKernelPdS_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17ConvolutionKernelPdS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00079a0e_00000000-6_2dconv_with_tiling.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2077: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2077: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z3refv .type _Z3refv, @function _Z3refv: .LFB2071: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 movl $-1, %ebx movl $0, %r13d movl $2, %r9d movl $0, %r12d leaq cref(%rip), %r10 leaq a(%rip), %r11 jmp .L4 .L6: addl $1, %eax cmpl %ecx, %eax je .L14 .L7: cmpl $2, %edx ja .L6 cmpl $2, %eax ja .L6 movl %edx, %r14d sall $12, %r14d addl %eax, %r14d movslq %r14d, %r14 movsd (%r11,%r14,8), %xmm0 mulsd 24(%rsi), %xmm0 addsd (%r10,%rdi,8), %xmm0 movsd %xmm0, (%r10,%rdi,8) jmp .L6 .L14: addq $24, %rsi addl $1, %edx cmpl %r9d, %edx je .L8 .L5: movl %r8d, %eax jmp .L7 .L8: addl $1, %ecx addl $1, %r8d cmpl $4098, %ecx je .L15 .L9: leaq b(%rip), %rsi movl %ebx, %edx leal 0(%rbp,%rcx), %edi movslq %edi, %rdi jmp .L5 .L15: addl $1, %r12d addl $1, %r9d addl $4096, %r13d addl $1, %ebx cmpl $4096, %r12d je .L3 .L4: movl $-1, %r8d movl $2, %ecx leal -2(%r13), %ebp jmp .L9 .L3: popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2071: .size _Z3refv, .-_Z3refv .globl _Z4initv .type _Z4initv, @function _Z4initv: .LFB2072: .cfi_startproc endbr64 leaq a(%rip), %rdi movl $4096, %ecx movl $0, %esi .L17: movl %esi, %eax movq %rdi, %rdx .L18: pxor %xmm0, %xmm0 cvtsi2sdl %eax, %xmm0 movsd %xmm0, (%rdx) addl $1, %eax addq $8, %rdx cmpl %eax, %ecx jne .L18 addl $1, %esi addq $32768, %rdi addl $1, %ecx cmpl $4096, %esi jne .L17 movq $0x000000000, b(%rip) movsd .LC1(%rip), %xmm1 movsd %xmm1, 8+b(%rip) movsd .LC2(%rip), %xmm0 movsd %xmm0, 16+b(%rip) movsd %xmm1, 24+b(%rip) movsd %xmm0, 32+b(%rip) movsd .LC3(%rip), %xmm1 movsd %xmm1, 40+b(%rip) movsd %xmm0, 48+b(%rip) movsd %xmm1, 56+b(%rip) movq .LC4(%rip), %rax movq %rax, 64+b(%rip) ret .cfi_endproc .LFE2072: .size _Z4initv, .-_Z4initv .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC7: .string "%d Diffs found over threshold %f; Max Diff = %f\n" .align 8 .LC8: .string "No differences found between reference and test versions\n" .text .globl _Z7compareiPdS_ .type _Z7compareiPdS_, @function _Z7compareiPdS_: .LFB2073: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 testl %edi, %edi jle .L23 movslq %edi, %rdi leaq 0(,%rdi,8), %rcx movl $0, %eax movl $0, %edi pxor %xmm1, %xmm1 movapd %xmm1, %xmm3 movq .LC5(%rip), %xmm4 movsd .LC6(%rip), %xmm2 jmp .L28 .L36: xorpd %xmm4, %xmm0 jmp .L24 .L26: addq $8, %rax cmpq %rax, %rcx je .L35 .L28: movsd (%rsi,%rax), %xmm0 subsd (%rdx,%rax), %xmm0 comisd %xmm0, %xmm3 ja .L36 .L24: comisd %xmm2, %xmm0 jbe .L26 addl $1, %edi maxsd %xmm1, %xmm0 movapd %xmm0, %xmm1 jmp .L26 .L35: testl %edi, %edi jle .L23 movsd .LC6(%rip), %xmm0 movl %edi, %edx leaq .LC7(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT .L22: addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L22 .cfi_endproc .LFE2073: .size _Z7compareiPdS_, .-_Z7compareiPdS_ .section .rodata.str1.8 .align 8 .LC9: .string "Error return from gettimeofday: %d" .text .globl _Z7rtclockv .type _Z7rtclockv, @function _Z7rtclockv: .LFB2074: .cfi_startproc endbr64 subq $56, %rsp .cfi_def_cfa_offset 64 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leaq 8(%rsp), %rsi leaq 16(%rsp), %rdi call gettimeofday@PLT testl %eax, %eax jne .L41 .L38: pxor %xmm0, %xmm0 cvtsi2sdq 24(%rsp), %xmm0 mulsd .LC10(%rip), %xmm0 pxor %xmm1, %xmm1 cvtsi2sdq 16(%rsp), %xmm1 addsd %xmm1, %xmm0 movq 40(%rsp), %rax subq %fs:40, %rax jne .L42 addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L41: .cfi_restore_state movl %eax, %edx leaq .LC9(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L38 .L42: call __stack_chk_fail@PLT .cfi_endproc .LFE2074: .size _Z7rtclockv, .-_Z7rtclockv .globl _Z40__device_stub__Z17ConvolutionKernelPdS_iPdS_i .type _Z40__device_stub__Z17ConvolutionKernelPdS_iPdS_i, @function _Z40__device_stub__Z17ConvolutionKernelPdS_iPdS_i: .LFB2099: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L47 .L43: movq 120(%rsp), %rax subq %fs:40, %rax jne .L48 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L47: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z17ConvolutionKernelPdS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L43 .L48: call __stack_chk_fail@PLT .cfi_endproc .LFE2099: .size _Z40__device_stub__Z17ConvolutionKernelPdS_iPdS_i, .-_Z40__device_stub__Z17ConvolutionKernelPdS_iPdS_i .globl _Z17ConvolutionKernelPdS_i .type _Z17ConvolutionKernelPdS_i, @function _Z17ConvolutionKernelPdS_i: .LFB2100: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z40__device_stub__Z17ConvolutionKernelPdS_iPdS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2100: .size _Z17ConvolutionKernelPdS_i, .-_Z17ConvolutionKernelPdS_i .section .rodata.str1.1,"aMS",@progbits,1 .LC11: .string "dev_prop.totalConstMem = %lu\n" .LC12: .string "Input Size = %dx%d\n" .LC13: .string "Mask size = %dx%d\n" .section .rodata.str1.8 .align 8 .LC16: .string "Seq: Approx GFLOPS: %.6f ; Time = %.6f sec; \n" .section .rodata.str1.1 .LC17: .string "Error return for test_kernel\n" .section .rodata.str1.8 .align 8 .LC18: .string "GPU: Approx GFLOPS: %.6f ; Time = %.6f sec; \n" .align 8 .LC19: .string "Correctness Check for GPU solution:\n" .section .rodata.str1.1 .LC20: .string "Correct!\n" .text .globl main .type main, @function main: .LFB2070: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $1104, %rsp .cfi_def_cfa_offset 1120 movq %fs:40, %rax movq %rax, 1096(%rsp) xorl %eax, %eax leaq 64(%rsp), %rdi movl $0, %esi call cudaGetDeviceProperties_v2@PLT movq 416(%rsp), %rdx leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $4, 40(%rsp) movl $4, 44(%rsp) movl $1, 48(%rsp) movl $1024, 52(%rsp) movl $1024, 56(%rsp) movl $1, 60(%rsp) movl $4096, %ecx movl $4096, %edx leaq .LC12(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $3, %ecx movl $3, %edx leaq .LC13(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call _Z4initv call _Z7rtclockv movsd %xmm0, 8(%rsp) call _Z3refv call _Z7rtclockv movapd %xmm0, %xmm1 subsd 8(%rsp), %xmm1 movsd .LC14(%rip), %xmm0 divsd %xmm1, %xmm0 divsd .LC15(%rip), %xmm0 leaq .LC16(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT leaq 16(%rsp), %rdi movl $134217728, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $72, %esi call cudaMalloc@PLT leaq 32(%rsp), %rdi movl $134217728, %esi call cudaMalloc@PLT movl $1, %r8d movl $0, %ecx movl $72, %edx leaq b(%rip), %rbx movq %rbx, %rsi leaq _ZL2Mc(%rip), %rdi call cudaMemcpyToSymbol@PLT movl $1, %ecx movl $134217728, %edx leaq a(%rip), %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $72, %edx movq %rbx, %rsi movq 24(%rsp), %rdi call cudaMemcpy@PLT call _Z7rtclockv movsd %xmm0, 8(%rsp) movl 60(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 52(%rsp), %rdx movq 40(%rsp), %rdi movl 48(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L57 .L52: call cudaDeviceSynchronize@PLT testl %eax, %eax je .L53 leaq .LC17(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L54: movq 1096(%rsp), %rax subq %fs:40, %rax jne .L58 movl $0, %eax addq $1104, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L57: .cfi_restore_state movl $4096, %edx movq 32(%rsp), %rsi movq 16(%rsp), %rdi call _Z40__device_stub__Z17ConvolutionKernelPdS_iPdS_i jmp .L52 .L53: call _Z7rtclockv subsd 8(%rsp), %xmm0 movsd %xmm0, 8(%rsp) movl $2, %ecx movl $134217728, %edx movq 32(%rsp), %rsi leaq c(%rip), %rdi call cudaMemcpy@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT movsd .LC14(%rip), %xmm0 movsd 8(%rsp), %xmm1 divsd %xmm1, %xmm0 divsd .LC15(%rip), %xmm0 leaq .LC18(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT leaq .LC19(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC20(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L54 .L58: call __stack_chk_fail@PLT .cfi_endproc .LFE2070: .size main, .-main .section .rodata.str1.1 .LC21: .string "_Z17ConvolutionKernelPdS_i" .LC22: .string "Mc" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2102: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC21(%rip), %rdx movq %rdx, %rcx leaq _Z17ConvolutionKernelPdS_i(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $1 .cfi_def_cfa_offset 32 movl $72, %r9d movl $0, %r8d leaq .LC22(%rip), %rdx movq %rdx, %rcx leaq _ZL2Mc(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2102: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .globl cref .bss .align 32 .type cref, @object .size cref, 134217728 cref: .zero 134217728 .globl c .align 32 .type c, @object .size c, 134217728 c: .zero 134217728 .globl b .align 32 .type b, @object .size b, 72 b: .zero 72 .globl a .align 32 .type a, @object .size a, 134217728 a: .zero 134217728 .local _ZL2Mc .comm _ZL2Mc,72,32 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long 0 .long 1072693248 .align 8 .LC2: .long 0 .long 1073741824 .align 8 .LC3: .long 0 .long 1074266112 .align 8 .LC4: .long 0 .long 1074790400 .section .rodata.cst16,"aM",@progbits,16 .align 16 .LC5: .long 0 .long -2147483648 .long 0 .long 0 .section .rodata.cst8 .align 8 .LC6: .long 1202590843 .long 1065646817 .align 8 .LC10: .long -1598689907 .long 1051772663 .align 8 .LC14: .long 0 .long 1101135872 .align 8 .LC15: .long 0 .long 1104006501 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "2dconv_with_tiling.hip" .globl _Z32__device_stub__ConvolutionKernelPdS_i # -- Begin function _Z32__device_stub__ConvolutionKernelPdS_i .p2align 4, 0x90 .type _Z32__device_stub__ConvolutionKernelPdS_i,@function _Z32__device_stub__ConvolutionKernelPdS_i: # @_Z32__device_stub__ConvolutionKernelPdS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z17ConvolutionKernelPdS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z32__device_stub__ConvolutionKernelPdS_i, .Lfunc_end0-_Z32__device_stub__ConvolutionKernelPdS_i .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI1_0: .quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7 .LCPI1_1: .quad 0x41a2000000000000 # double 150994944 .LCPI1_2: .quad 0x41cdcd6500000000 # double 1.0E+9 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $1624, %rsp # imm = 0x658 .cfi_def_cfa_offset 1680 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 leaq 152(%rsp), %rdi xorl %ebx, %ebx xorl %esi, %esi callq hipGetDevicePropertiesR0600 movq 504(%rsp), %rsi movl $.L.str, %edi xorl %eax, %eax callq printf movl $.L.str.1, %edi movl $4096, %esi # imm = 0x1000 movl $4096, %edx # imm = 0x1000 xorl %eax, %eax callq printf movl $.L.str.2, %edi movl $3, %esi movl $3, %edx xorl %eax, %eax callq printf movl $a, %eax .p2align 4, 0x90 .LBB1_1: # %.preheader19.i # =>This Loop Header: Depth=1 # Child Loop BB1_2 Depth 2 xorl %ecx, %ecx .p2align 4, 0x90 .LBB1_2: # Parent Loop BB1_1 Depth=1 # => This Inner Loop Header: Depth=2 leal (%rbx,%rcx), %edx xorps %xmm0, %xmm0 cvtsi2sd %edx, %xmm0 movsd %xmm0, (%rax,%rcx,8) incq %rcx cmpq $4096, %rcx # imm = 0x1000 jne .LBB1_2 # %bb.3: # in Loop: Header=BB1_1 Depth=1 incq %rbx addq $32768, %rax # imm = 0x8000 cmpq $4096, %rbx # imm = 0x1000 jne .LBB1_1 # %bb.4: # %.preheader.i.preheader movl $b, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB1_5: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB1_6 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB1_6: # Parent Loop BB1_5 Depth=1 # => This Inner Loop Header: Depth=2 leal (%rcx,%rdx), %esi xorps %xmm0, %xmm0 cvtsi2sd %esi, %xmm0 movsd %xmm0, (%rax,%rdx,8) incq %rdx cmpq $3, %rdx jne .LBB1_6 # %bb.7: # in Loop: Header=BB1_5 Depth=1 incq %rcx addq $24, %rax cmpq $3, %rcx jne .LBB1_5 # %bb.8: # %_Z4initv.exit leaq 16(%rsp), %rdi leaq 48(%rsp), %rsi callq gettimeofday testl %eax, %eax je .LBB1_10 # %bb.9: movl $.L.str.10, %edi movl %eax, %esi xorl %eax, %eax callq printf .LBB1_10: # %_Z7rtclockv.exit movq 16(%rsp), %rax cvtsi2sdq 24(%rsp), %xmm1 mulsd .LCPI1_0(%rip), %xmm1 leaq a-32776(%rip), %rcx xorl %edx, %edx jmp .LBB1_11 .p2align 4, 0x90 .LBB1_20: # in Loop: Header=BB1_11 Depth=1 incq %rdx addq $32768, %rcx # imm = 0x8000 cmpq $4096, %rdx # imm = 0x1000 je .LBB1_21 .LBB1_11: # %.preheader31.i # =>This Loop Header: Depth=1 # Child Loop BB1_12 Depth 2 # Child Loop BB1_13 Depth 3 # Child Loop BB1_14 Depth 4 leaq -1(%rdx), %rsi movq %rdx, %rdi shlq $12, %rdi movq $-1, %r8 movq %rcx, %r9 xorl %r10d, %r10d jmp .LBB1_12 .p2align 4, 0x90 .LBB1_19: # in Loop: Header=BB1_12 Depth=2 incq %r10 addq $8, %r9 incq %r8 cmpq $4096, %r10 # imm = 0x1000 je .LBB1_20 .LBB1_12: # Parent Loop BB1_11 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB1_13 Depth 3 # Child Loop BB1_14 Depth 4 leaq (%r10,%rdi), %r11 movq %r9, %rbx xorl %r14d, %r14d jmp .LBB1_13 .p2align 4, 0x90 .LBB1_18: # in Loop: Header=BB1_13 Depth=3 incq %r14 addq $32768, %rbx # imm = 0x8000 cmpq $3, %r14 je .LBB1_19 .LBB1_13: # %.preheader.i24 # Parent Loop BB1_11 Depth=1 # Parent Loop BB1_12 Depth=2 # => This Loop Header: Depth=3 # Child Loop BB1_14 Depth 4 leal (%rsi,%r14), %ebp leaq (%r14,%r14,2), %r15 xorl %r12d, %r12d jmp .LBB1_14 .p2align 4, 0x90 .LBB1_17: # in Loop: Header=BB1_14 Depth=4 incq %r12 cmpq $3, %r12 je .LBB1_18 .LBB1_14: # Parent Loop BB1_11 Depth=1 # Parent Loop BB1_12 Depth=2 # Parent Loop BB1_13 Depth=3 # => This Inner Loop Header: Depth=4 cmpl $2, %ebp ja .LBB1_17 # %bb.15: # in Loop: Header=BB1_14 Depth=4 leal (%r8,%r12), %r13d cmpl $2, %r13d ja .LBB1_17 # %bb.16: # in Loop: Header=BB1_14 Depth=4 movsd (%rbx,%r12,8), %xmm0 # xmm0 = mem[0],zero mulsd b+24(,%r15,8), %xmm0 addsd cref(,%r11,8), %xmm0 movsd %xmm0, cref(,%r11,8) jmp .LBB1_17 .LBB1_21: # %_Z3refv.exit xorps %xmm0, %xmm0 cvtsi2sd %rax, %xmm0 addsd %xmm0, %xmm1 movsd %xmm1, 8(%rsp) # 8-byte Spill leaq 16(%rsp), %rdi leaq 48(%rsp), %rsi callq gettimeofday testl %eax, %eax je .LBB1_23 # %bb.22: movl $.L.str.10, %edi movl %eax, %esi xorl %eax, %eax callq printf .LBB1_23: # %_Z7rtclockv.exit29 xorps %xmm0, %xmm0 cvtsi2sdq 16(%rsp), %xmm0 xorps %xmm1, %xmm1 cvtsi2sdq 24(%rsp), %xmm1 mulsd .LCPI1_0(%rip), %xmm1 addsd %xmm0, %xmm1 subsd 8(%rsp), %xmm1 # 8-byte Folded Reload movsd .LCPI1_1(%rip), %xmm0 # xmm0 = mem[0],zero divsd %xmm1, %xmm0 divsd .LCPI1_2(%rip), %xmm0 movl $.L.str.3, %edi movb $2, %al callq printf leaq 72(%rsp), %rdi movl $134217728, %esi # imm = 0x8000000 callq hipMalloc leaq 88(%rsp), %rdi movl $72, %esi callq hipMalloc leaq 64(%rsp), %rdi movl $134217728, %esi # imm = 0x8000000 callq hipMalloc movl $Mc, %edi movl $b, %esi movl $72, %edx xorl %ecx, %ecx movl $1, %r8d callq hipMemcpyToSymbol movq 72(%rsp), %rdi movl $a, %esi movl $134217728, %edx # imm = 0x8000000 movl $1, %ecx callq hipMemcpy movq 88(%rsp), %rdi movl $b, %esi movl $72, %edx movl $1, %ecx callq hipMemcpy leaq 16(%rsp), %rdi leaq 48(%rsp), %rsi callq gettimeofday testl %eax, %eax je .LBB1_25 # %bb.24: movl $.L.str.10, %edi movl %eax, %esi xorl %eax, %eax callq printf .LBB1_25: # %_Z7rtclockv.exit31 xorps %xmm0, %xmm0 cvtsi2sdq 16(%rsp), %xmm0 movsd %xmm0, 96(%rsp) # 8-byte Spill xorps %xmm0, %xmm0 cvtsi2sdq 24(%rsp), %xmm0 movsd %xmm0, 8(%rsp) # 8-byte Spill movabsq $17179869188, %rdi # imm = 0x400000004 movabsq $4398046512128, %rdx # imm = 0x40000000400 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_27 # %bb.26: movq 72(%rsp), %rax movq 64(%rsp), %rcx movq %rax, 144(%rsp) movq %rcx, 136(%rsp) movl $4096, 84(%rsp) # imm = 0x1000 leaq 144(%rsp), %rax movq %rax, 16(%rsp) leaq 136(%rsp), %rax movq %rax, 24(%rsp) leaq 84(%rsp), %rax movq %rax, 32(%rsp) leaq 48(%rsp), %rdi leaq 120(%rsp), %rsi leaq 112(%rsp), %rdx leaq 104(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 120(%rsp), %rcx movl 128(%rsp), %r8d leaq 16(%rsp), %r9 movl $_Z17ConvolutionKernelPdS_i, %edi pushq 104(%rsp) .cfi_adjust_cfa_offset 8 pushq 120(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_27: callq hipDeviceSynchronize movl $.Lstr.2, %edi testl %eax, %eax jne .LBB1_31 # %bb.28: movsd 8(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero mulsd .LCPI1_0(%rip), %xmm0 addsd 96(%rsp), %xmm0 # 8-byte Folded Reload movsd %xmm0, 8(%rsp) # 8-byte Spill leaq 16(%rsp), %rdi leaq 48(%rsp), %rsi callq gettimeofday testl %eax, %eax je .LBB1_30 # %bb.29: movl $.L.str.10, %edi movl %eax, %esi xorl %eax, %eax callq printf .LBB1_30: # %_Z7rtclockv.exit33 xorps %xmm0, %xmm0 cvtsi2sdq 16(%rsp), %xmm0 cvtsi2sdq 24(%rsp), %xmm1 mulsd .LCPI1_0(%rip), %xmm1 addsd %xmm0, %xmm1 subsd 8(%rsp), %xmm1 # 8-byte Folded Reload movsd %xmm1, 8(%rsp) # 8-byte Spill movq 64(%rsp), %rsi movl $c, %edi movl $134217728, %edx # imm = 0x8000000 movl $2, %ecx callq hipMemcpy movq 72(%rsp), %rdi callq hipFree movq 88(%rsp), %rdi callq hipFree movq 64(%rsp), %rdi callq hipFree movsd .LCPI1_1(%rip), %xmm0 # xmm0 = mem[0],zero movsd 8(%rsp), %xmm1 # 8-byte Reload # xmm1 = mem[0],zero divsd %xmm1, %xmm0 divsd .LCPI1_2(%rip), %xmm0 movl $.L.str.5, %edi movb $2, %al callq printf movl $.Lstr, %edi callq puts@PLT movl $.Lstr.1, %edi .LBB1_31: callq puts@PLT xorl %eax, %eax addq $1624, %rsp # imm = 0x658 .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .globl _Z4initv # -- Begin function _Z4initv .p2align 4, 0x90 .type _Z4initv,@function _Z4initv: # @_Z4initv .cfi_startproc # %bb.0: movl $a, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB2_1: # %.preheader19 # =>This Loop Header: Depth=1 # Child Loop BB2_2 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB2_2: # Parent Loop BB2_1 Depth=1 # => This Inner Loop Header: Depth=2 leal (%rcx,%rdx), %esi xorps %xmm0, %xmm0 cvtsi2sd %esi, %xmm0 movsd %xmm0, (%rax,%rdx,8) incq %rdx cmpq $4096, %rdx # imm = 0x1000 jne .LBB2_2 # %bb.3: # in Loop: Header=BB2_1 Depth=1 incq %rcx addq $32768, %rax # imm = 0x8000 cmpq $4096, %rcx # imm = 0x1000 jne .LBB2_1 # %bb.4: # %.preheader.preheader movl $b, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB2_5: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB2_6 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB2_6: # Parent Loop BB2_5 Depth=1 # => This Inner Loop Header: Depth=2 leal (%rcx,%rdx), %esi xorps %xmm0, %xmm0 cvtsi2sd %esi, %xmm0 movsd %xmm0, (%rax,%rdx,8) incq %rdx cmpq $3, %rdx jne .LBB2_6 # %bb.7: # in Loop: Header=BB2_5 Depth=1 incq %rcx addq $24, %rax cmpq $3, %rcx jne .LBB2_5 # %bb.8: retq .Lfunc_end2: .size _Z4initv, .Lfunc_end2-_Z4initv .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z7rtclockv .LCPI3_0: .quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7 .text .globl _Z7rtclockv .p2align 4, 0x90 .type _Z7rtclockv,@function _Z7rtclockv: # @_Z7rtclockv .cfi_startproc # %bb.0: subq $24, %rsp .cfi_def_cfa_offset 32 movq %rsp, %rdi leaq 16(%rsp), %rsi callq gettimeofday testl %eax, %eax je .LBB3_2 # %bb.1: movl $.L.str.10, %edi movl %eax, %esi xorl %eax, %eax callq printf .LBB3_2: cvtsi2sdq (%rsp), %xmm1 cvtsi2sdq 8(%rsp), %xmm0 mulsd .LCPI3_0(%rip), %xmm0 addsd %xmm1, %xmm0 addq $24, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size _Z7rtclockv, .Lfunc_end3-_Z7rtclockv .cfi_endproc # -- End function .globl _Z3refv # -- Begin function _Z3refv .p2align 4, 0x90 .type _Z3refv,@function _Z3refv: # @_Z3refv .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 leaq a-32776(%rip), %rax xorl %ecx, %ecx jmp .LBB4_1 .p2align 4, 0x90 .LBB4_10: # in Loop: Header=BB4_1 Depth=1 incq %rcx addq $32768, %rax # imm = 0x8000 cmpq $4096, %rcx # imm = 0x1000 je .LBB4_11 .LBB4_1: # %.preheader31 # =>This Loop Header: Depth=1 # Child Loop BB4_2 Depth 2 # Child Loop BB4_3 Depth 3 # Child Loop BB4_4 Depth 4 leaq -1(%rcx), %rdx movq %rcx, %rsi shlq $12, %rsi movq $-1, %rdi movq %rax, %r8 xorl %r9d, %r9d jmp .LBB4_2 .p2align 4, 0x90 .LBB4_9: # in Loop: Header=BB4_2 Depth=2 incq %r9 addq $8, %r8 incq %rdi cmpq $4096, %r9 # imm = 0x1000 je .LBB4_10 .LBB4_2: # Parent Loop BB4_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB4_3 Depth 3 # Child Loop BB4_4 Depth 4 leaq (%r9,%rsi), %r10 movq %r8, %r11 xorl %ebx, %ebx jmp .LBB4_3 .p2align 4, 0x90 .LBB4_8: # in Loop: Header=BB4_3 Depth=3 incq %rbx addq $32768, %r11 # imm = 0x8000 cmpq $3, %rbx je .LBB4_9 .LBB4_3: # %.preheader # Parent Loop BB4_1 Depth=1 # Parent Loop BB4_2 Depth=2 # => This Loop Header: Depth=3 # Child Loop BB4_4 Depth 4 leal (%rdx,%rbx), %ebp leaq (%rbx,%rbx,2), %r14 xorl %r15d, %r15d jmp .LBB4_4 .p2align 4, 0x90 .LBB4_7: # in Loop: Header=BB4_4 Depth=4 incq %r15 cmpq $3, %r15 je .LBB4_8 .LBB4_4: # Parent Loop BB4_1 Depth=1 # Parent Loop BB4_2 Depth=2 # Parent Loop BB4_3 Depth=3 # => This Inner Loop Header: Depth=4 cmpl $2, %ebp ja .LBB4_7 # %bb.5: # in Loop: Header=BB4_4 Depth=4 leal (%rdi,%r15), %r12d cmpl $2, %r12d ja .LBB4_7 # %bb.6: # in Loop: Header=BB4_4 Depth=4 movsd (%r11,%r15,8), %xmm0 # xmm0 = mem[0],zero mulsd b+24(,%r14,8), %xmm0 addsd cref(,%r10,8), %xmm0 movsd %xmm0, cref(,%r10,8) jmp .LBB4_7 .LBB4_11: popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _Z3refv, .Lfunc_end4-_Z3refv .cfi_endproc # -- End function .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 # -- Begin function _Z7compareiPdS_ .LCPI5_0: .quad 0x8000000000000000 # double -0 .quad 0x8000000000000000 # double -0 .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI5_1: .quad 0x3f847ae147ae147b # double 0.01 .text .globl _Z7compareiPdS_ .p2align 4, 0x90 .type _Z7compareiPdS_,@function _Z7compareiPdS_: # @_Z7compareiPdS_ .cfi_startproc # %bb.0: testl %edi, %edi jle .LBB5_1 # %bb.2: # %.lr.ph.preheader movq %rsi, %rax movl %edi, %ecx xorpd %xmm3, %xmm3 xorl %edi, %edi movapd .LCPI5_0(%rip), %xmm0 # xmm0 = [-0.0E+0,-0.0E+0] movsd .LCPI5_1(%rip), %xmm2 # xmm2 = mem[0],zero xorl %esi, %esi jmp .LBB5_3 .p2align 4, 0x90 .LBB5_5: # in Loop: Header=BB5_3 Depth=1 movapd %xmm3, %xmm1 .LBB5_6: # in Loop: Header=BB5_3 Depth=1 incq %rdi movapd %xmm1, %xmm3 cmpq %rdi, %rcx je .LBB5_7 .LBB5_3: # %.lr.ph # =>This Inner Loop Header: Depth=1 movsd (%rax,%rdi,8), %xmm4 # xmm4 = mem[0],zero subsd (%rdx,%rdi,8), %xmm4 movapd %xmm4, %xmm1 xorpd %xmm0, %xmm1 maxsd %xmm4, %xmm1 ucomisd %xmm2, %xmm1 jbe .LBB5_5 # %bb.4: # in Loop: Header=BB5_3 Depth=1 incl %esi ucomisd %xmm3, %xmm1 jbe .LBB5_5 jmp .LBB5_6 .LBB5_1: xorl %esi, %esi xorpd %xmm1, %xmm1 .LBB5_7: # %._crit_edge testl %esi, %esi jle .LBB5_9 # %bb.8: movsd .LCPI5_1(%rip), %xmm0 # xmm0 = mem[0],zero movl $.L.str.8, %edi movb $2, %al jmp printf # TAILCALL .LBB5_9: movl $.Lstr.3, %edi jmp puts@PLT # TAILCALL .Lfunc_end5: .size _Z7compareiPdS_, .Lfunc_end5-_Z7compareiPdS_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17ConvolutionKernelPdS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $0, 8(%rsp) movl $1, (%rsp) movl $Mc, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movl $72, %r9d movq %rbx, %rdi xorl %r8d, %r8d callq __hipRegisterVar movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type Mc,@object # @Mc .local Mc .comm Mc,72,16 .type _Z17ConvolutionKernelPdS_i,@object # @_Z17ConvolutionKernelPdS_i .section .rodata,"a",@progbits .globl _Z17ConvolutionKernelPdS_i .p2align 3, 0x0 _Z17ConvolutionKernelPdS_i: .quad _Z32__device_stub__ConvolutionKernelPdS_i .size _Z17ConvolutionKernelPdS_i, 8 .type a,@object # @a .bss .globl a .p2align 4, 0x0 a: .zero 134217728 .size a, 134217728 .type b,@object # @b .globl b .p2align 4, 0x0 b: .zero 72 .size b, 72 .type c,@object # @c .globl c .p2align 4, 0x0 c: .zero 134217728 .size c, 134217728 .type cref,@object # @cref .globl cref .p2align 4, 0x0 cref: .zero 134217728 .size cref, 134217728 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "dev_prop.totalConstMem = %lu\n" .size .L.str, 30 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Input Size = %dx%d\n" .size .L.str.1, 20 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "Mask size = %dx%d\n" .size .L.str.2, 20 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Seq: Approx GFLOPS: %.6f .size .L.str.3, 46 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "GPU: Approx GFLOPS: %.6f .size .L.str.5, 46 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "%d Diffs found over threshold %f .size .L.str.8, 49 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "Error return from gettimeofday: %d" .size .L.str.10, 35 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z17ConvolutionKernelPdS_i" .size .L__unnamed_1, 27 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "Mc" .size .L__unnamed_2, 3 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Correctness Check for GPU solution:" .size .Lstr, 36 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "Correct!" .size .Lstr.1, 9 .type .Lstr.2,@object # @str.2 .Lstr.2: .asciz "Error return for test_kernel" .size .Lstr.2, 29 .type .Lstr.3,@object # @str.3 .Lstr.3: .asciz "No differences found between reference and test versions" .size .Lstr.3, 57 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z32__device_stub__ConvolutionKernelPdS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym Mc .addrsig_sym _Z17ConvolutionKernelPdS_i .addrsig_sym a .addrsig_sym b .addrsig_sym c .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void kernel_dot_product(const double * vec1, const double * vec2, int numElements, double * answer) { extern __shared__ double products[]; // one element per thread int i = threadIdx.x; // numElements assumed to fit into one block products[i] = vec1[i] * vec2[i]; __syncthreads(); if (i == 0) { double sum = 0; for (int j = 0; j < numElements; ++j) { sum += products[j]; } *answer = sum; } }
code for sm_80 Function : _Z18kernel_dot_productPKdS0_iPd .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */ /* 0x000e220000002100 */ /*0020*/ IMAD.MOV.U32 R4, RZ, RZ, 0x8 ; /* 0x00000008ff047424 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fc60000000a00 */ /*0040*/ IMAD.WIDE R2, R9, R4, c[0x0][0x160] ; /* 0x0000580009027625 */ /* 0x001fc800078e0204 */ /*0050*/ IMAD.WIDE R4, R9.reuse, R4, c[0x0][0x168] ; /* 0x00005a0009047625 */ /* 0x040fe400078e0204 */ /*0060*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000602027981 */ /* 0x000ea8000c1e1b00 */ /*0070*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000604047981 */ /* 0x000ea2000c1e1b00 */ /*0080*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */ /* 0x000fe20003f05270 */ /*0090*/ DMUL R6, R4, R2 ; /* 0x0000000204067228 */ /* 0x004e0e0000000000 */ /*00a0*/ STS.64 [R9.X8], R6 ; /* 0x0000000609007388 */ /* 0x0011e80000008a00 */ /*00b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*00c0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00d0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff027624 */ /* 0x001fe200078e00ff */ /*00e0*/ CS2R R20, SRZ ; /* 0x0000000000147805 */ /* 0x000fc8000001ff00 */ /*00f0*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */ /* 0x000fda0003f06270 */ /*0100*/ @!P0 BRA 0x660 ; /* 0x0000055000008947 */ /* 0x000fea0003800000 */ /*0110*/ IADD3 R0, R2.reuse, -0x1, RZ ; /* 0xffffffff02007810 */ /* 0x040fe20007ffe0ff */ /*0120*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fe20008000000 */ /*0130*/ LOP3.LUT R2, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302027812 */ /* 0x000fe200078ec0ff */ /*0140*/ CS2R R20, SRZ ; /* 0x0000000000147805 */ /* 0x000fe2000001ff00 */ /*0150*/ ISETP.GE.U32.AND P0, PT, R0, 0x3, PT ; /* 0x000000030000780c */ /* 0x000fda0003f06070 */ /*0160*/ @!P0 BRA 0x5d0 ; /* 0x0000046000008947 */ /* 0x000fea0003800000 */ /*0170*/ IADD3 R0, -R2, c[0x0][0x170], RZ ; /* 0x00005c0002007a10 */ /* 0x000fe20007ffe1ff */ /*0180*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fe20008000000 */ /*0190*/ CS2R R20, SRZ ; /* 0x0000000000147805 */ /* 0x000fe2000001ff00 */ /*01a0*/ UMOV UR5, URZ ; /* 0x0000003f00057c82 */ /* 0x000fe20008000000 */ /*01b0*/ ISETP.GT.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fda0003f04270 */ /*01c0*/ @!P0 BRA 0x520 ; /* 0x0000035000008947 */ /* 0x000fea0003800000 */ /*01d0*/ ISETP.GT.AND P1, PT, R0, 0xc, PT ; /* 0x0000000c0000780c */ /* 0x000fe40003f24270 */ /*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*01f0*/ @!P1 BRA 0x3e0 ; /* 0x000001e000009947 */ /* 0x000fea0003800000 */ /*0200*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0210*/ LDS.128 R4, [UR5] ; /* 0x00000005ff047984 */ /* 0x001e220008000c00 */ /*0220*/ IADD3 R0, R0, -0x10, RZ ; /* 0xfffffff000007810 */ /* 0x000fe20007ffe0ff */ /*0230*/ UIADD3 UR4, UR4, 0x10, URZ ; /* 0x0000001004047890 */ /* 0x000fe4000fffe03f */ /*0240*/ LDS.128 R8, [UR5+0x10] ; /* 0x00001005ff087984 */ /* 0x002e620008000c00 */ /*0250*/ ISETP.GT.AND P1, PT, R0, 0xc, PT ; /* 0x0000000c0000780c */ /* 0x000fc60003f24270 */ /*0260*/ LDS.128 R12, [UR5+0x20] ; /* 0x00002005ff0c7984 */ /* 0x000ea80008000c00 */ /*0270*/ LDS.128 R16, [UR5+0x30] ; /* 0x00003005ff107984 */ /* 0x000ee20008000c00 */ /*0280*/ DADD R4, R4, R20 ; /* 0x0000000004047229 */ /* 0x001e0c0000000014 */ /*0290*/ DADD R20, R6, R4 ; /* 0x0000000006147229 */ /* 0x0010480000000004 */ /*02a0*/ LDS.128 R4, [UR5+0x40] ; /* 0x00004005ff047984 */ /* 0x001e240008000c00 */ /*02b0*/ DADD R8, R20, R8 ; /* 0x0000000014087229 */ /* 0x002e4c0000000008 */ /*02c0*/ DADD R20, R10, R8 ; /* 0x000000000a147229 */ /* 0x0022880000000008 */ /*02d0*/ LDS.128 R8, [UR5+0x50] ; /* 0x00005005ff087984 */ /* 0x002e640008000c00 */ /*02e0*/ DADD R12, R20, R12 ; /* 0x00000000140c7229 */ /* 0x004e8c000000000c */ /*02f0*/ DADD R20, R14, R12 ; /* 0x000000000e147229 */ /* 0x0044c8000000000c */ /*0300*/ LDS.128 R12, [UR5+0x60] ; /* 0x00006005ff0c7984 */ /* 0x004ea40008000c00 */ /*0310*/ DADD R16, R20, R16 ; /* 0x0000000014107229 */ /* 0x008ecc0000000010 */ /*0320*/ DADD R20, R18, R16 ; /* 0x0000000012147229 */ /* 0x0086080000000010 */ /*0330*/ LDS.128 R16, [UR5+0x70] ; /* 0x00007005ff107984 */ /* 0x008ee20008000c00 */ /*0340*/ UIADD3 UR5, UR5, 0x80, URZ ; /* 0x0000008005057890 */ /* 0x000fe2000fffe03f */ /*0350*/ DADD R4, R20, R4 ; /* 0x0000000014047229 */ /* 0x001e0c0000000004 */ /*0360*/ DADD R4, R6, R4 ; /* 0x0000000006047229 */ /* 0x001e4c0000000004 */ /*0370*/ DADD R4, R4, R8 ; /* 0x0000000004047229 */ /* 0x002e0c0000000008 */ /*0380*/ DADD R4, R10, R4 ; /* 0x000000000a047229 */ /* 0x001e8c0000000004 */ /*0390*/ DADD R4, R4, R12 ; /* 0x0000000004047229 */ /* 0x004e0c000000000c */ /*03a0*/ DADD R4, R14, R4 ; /* 0x000000000e047229 */ /* 0x001ecc0000000004 */ /*03b0*/ DADD R4, R4, R16 ; /* 0x0000000004047229 */ /* 0x008e0c0000000010 */ /*03c0*/ DADD R20, R18, R4 ; /* 0x0000000012147229 */ /* 0x0010620000000004 */ /*03d0*/ @P1 BRA 0x210 ; /* 0xfffffe3000001947 */ /* 0x000fea000383ffff */ /*03e0*/ ISETP.GT.AND P1, PT, R0, 0x4, PT ; /* 0x000000040000780c */ /* 0x000fda0003f24270 */ /*03f0*/ @!P1 BRA 0x500 ; /* 0x0000010000009947 */ /* 0x000fea0003800000 */ /*0400*/ LDS.128 R4, [UR5] ; /* 0x00000005ff047984 */ /* 0x001e220008000c00 */ /*0410*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe20003f0e170 */ /*0420*/ UIADD3 UR4, UR4, 0x8, URZ ; /* 0x0000000804047890 */ /* 0x000fe2000fffe03f */ /*0430*/ IADD3 R0, R0, -0x8, RZ ; /* 0xfffffff800007810 */ /* 0x000fe20007ffe0ff */ /*0440*/ LDS.128 R8, [UR5+0x10] ; /* 0x00001005ff087984 */ /* 0x000ea80008000c00 */ /*0450*/ LDS.128 R12, [UR5+0x20] ; /* 0x00002005ff0c7984 */ /* 0x000ee80008000c00 */ /*0460*/ LDS.128 R16, [UR5+0x30] ; /* 0x00003005ff107984 */ /* 0x000f220008000c00 */ /*0470*/ UIADD3 UR5, UR5, 0x40, URZ ; /* 0x0000004005057890 */ /* 0x000fe2000fffe03f */ /*0480*/ DADD R4, R20, R4 ; /* 0x0000000014047229 */ /* 0x003e0c0000000004 */ /*0490*/ DADD R4, R6, R4 ; /* 0x0000000006047229 */ /* 0x001e8c0000000004 */ /*04a0*/ DADD R4, R4, R8 ; /* 0x0000000004047229 */ /* 0x004e0c0000000008 */ /*04b0*/ DADD R4, R10, R4 ; /* 0x000000000a047229 */ /* 0x001ecc0000000004 */ /*04c0*/ DADD R4, R4, R12 ; /* 0x0000000004047229 */ /* 0x008e0c000000000c */ /*04d0*/ DADD R4, R14, R4 ; /* 0x000000000e047229 */ /* 0x001f0c0000000004 */ /*04e0*/ DADD R4, R4, R16 ; /* 0x0000000004047229 */ /* 0x010e0c0000000010 */ /*04f0*/ DADD R20, R18, R4 ; /* 0x0000000012147229 */ /* 0x0010480000000004 */ /*0500*/ ISETP.NE.OR P0, PT, R0, RZ, P0 ; /* 0x000000ff0000720c */ /* 0x000fda0000705670 */ /*0510*/ @!P0 BRA 0x5d0 ; /* 0x000000b000008947 */ /* 0x000fea0003800000 */ /*0520*/ LDS.128 R8, [UR5] ; /* 0x00000005ff087984 */ /* 0x000ea20008000c00 */ /*0530*/ IADD3 R0, R0, -0x4, RZ ; /* 0xfffffffc00007810 */ /* 0x000fe20007ffe0ff */ /*0540*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */ /* 0x000fe4000fffe03f */ /*0550*/ LDS.128 R4, [UR5+0x10] ; /* 0x00001005ff047984 */ /* 0x001e220008000c00 */ /*0560*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fe20003f05270 */ /*0570*/ UIADD3 UR5, UR5, 0x20, URZ ; /* 0x0000002005057890 */ /* 0x000fe2000fffe03f */ /*0580*/ DADD R8, R8, R20 ; /* 0x0000000008087229 */ /* 0x006e4c0000000014 */ /*0590*/ DADD R8, R10, R8 ; /* 0x000000000a087229 */ /* 0x002e0c0000000008 */ /*05a0*/ DADD R4, R8, R4 ; /* 0x0000000008047229 */ /* 0x001e0c0000000004 */ /*05b0*/ DADD R20, R6, R4 ; /* 0x0000000006147229 */ /* 0x0010640000000004 */ /*05c0*/ @P0 BRA 0x520 ; /* 0xffffff5000000947 */ /* 0x003fea000383ffff */ /*05d0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fda0003f05270 */ /*05e0*/ @!P0 BRA 0x660 ; /* 0x0000007000008947 */ /* 0x000fea0003800000 */ /*05f0*/ USHF.L.U32 UR4, UR4, 0x3, URZ ; /* 0x0000000304047899 */ /* 0x000fd2000800063f */ /*0600*/ LDS.64 R4, [UR4] ; /* 0x00000004ff047984 */ /* 0x001e220008000a00 */ /*0610*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */ /* 0x000fe20007ffe0ff */ /*0620*/ UIADD3 UR4, UR4, 0x8, URZ ; /* 0x0000000804047890 */ /* 0x000fc6000fffe03f */ /*0630*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fe20003f05270 */ /*0640*/ DADD R20, R4, R20 ; /* 0x0000000004147229 */ /* 0x0030580000000014 */ /*0650*/ @P0 BRA 0x600 ; /* 0xffffffa000000947 */ /* 0x000fea000383ffff */ /*0660*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff027624 */ /* 0x000fe400078e00ff */ /*0670*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff037624 */ /* 0x000fca00078e00ff */ /*0680*/ STG.E.64 [R2.64], R20 ; /* 0x0000001402007986 */ /* 0x002fe2000c101b06 */ /*0690*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*06a0*/ BRA 0x6a0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*06b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*06c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*06d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*06e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*06f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0700*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0710*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0720*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0730*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0740*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0750*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0760*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0770*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void kernel_dot_product(const double * vec1, const double * vec2, int numElements, double * answer) { extern __shared__ double products[]; // one element per thread int i = threadIdx.x; // numElements assumed to fit into one block products[i] = vec1[i] * vec2[i]; __syncthreads(); if (i == 0) { double sum = 0; for (int j = 0; j < numElements; ++j) { sum += products[j]; } *answer = sum; } }
.file "tmpxft_00015f2f_00000000-6_kernel_dot_product.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z45__device_stub__Z18kernel_dot_productPKdS0_iPdPKdS0_iPd .type _Z45__device_stub__Z18kernel_dot_productPKdS0_iPdPKdS0_iPd, @function _Z45__device_stub__Z18kernel_dot_productPKdS0_iPdPKdS0_iPd: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z18kernel_dot_productPKdS0_iPd(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z45__device_stub__Z18kernel_dot_productPKdS0_iPdPKdS0_iPd, .-_Z45__device_stub__Z18kernel_dot_productPKdS0_iPdPKdS0_iPd .globl _Z18kernel_dot_productPKdS0_iPd .type _Z18kernel_dot_productPKdS0_iPd, @function _Z18kernel_dot_productPKdS0_iPd: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z45__device_stub__Z18kernel_dot_productPKdS0_iPdPKdS0_iPd addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z18kernel_dot_productPKdS0_iPd, .-_Z18kernel_dot_productPKdS0_iPd .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z18kernel_dot_productPKdS0_iPd" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z18kernel_dot_productPKdS0_iPd(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void kernel_dot_product(const double * vec1, const double * vec2, int numElements, double * answer) { extern __shared__ double products[]; // one element per thread int i = threadIdx.x; // numElements assumed to fit into one block products[i] = vec1[i] * vec2[i]; __syncthreads(); if (i == 0) { double sum = 0; for (int j = 0; j < numElements; ++j) { sum += products[j]; } *answer = sum; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void kernel_dot_product(const double * vec1, const double * vec2, int numElements, double * answer) { extern __shared__ double products[]; // one element per thread int i = threadIdx.x; // numElements assumed to fit into one block products[i] = vec1[i] * vec2[i]; __syncthreads(); if (i == 0) { double sum = 0; for (int j = 0; j < numElements; ++j) { sum += products[j]; } *answer = sum; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void kernel_dot_product(const double * vec1, const double * vec2, int numElements, double * answer) { extern __shared__ double products[]; // one element per thread int i = threadIdx.x; // numElements assumed to fit into one block products[i] = vec1[i] * vec2[i]; __syncthreads(); if (i == 0) { double sum = 0; for (int j = 0; j < numElements; ++j) { sum += products[j]; } *answer = sum; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z18kernel_dot_productPKdS0_iPd .globl _Z18kernel_dot_productPKdS0_iPd .p2align 8 .type _Z18kernel_dot_productPKdS0_iPd,@function _Z18kernel_dot_productPKdS0_iPd: s_load_b128 s[4:7], s[0:1], 0x0 v_lshlrev_b32_e32 v5, 3, v0 s_mov_b32 s2, exec_lo s_waitcnt lgkmcnt(0) s_clause 0x1 global_load_b64 v[1:2], v5, s[4:5] global_load_b64 v[3:4], v5, s[6:7] s_waitcnt vmcnt(0) v_mul_f64 v[1:2], v[1:2], v[3:4] v_add_nc_u32_e32 v3, 0, v5 ds_store_b64 v3, v[1:2] s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv v_cmpx_eq_u32_e32 0, v0 s_cbranch_execz .LBB0_5 s_load_b32 s2, s[0:1], 0x10 v_mov_b32_e32 v0, 0 v_mov_b32_e32 v1, 0 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB0_4 s_mov_b32 s3, 0 .LBB0_3: s_delay_alu instid0(SALU_CYCLE_1) v_mov_b32_e32 v2, s3 s_add_i32 s2, s2, -1 s_add_i32 s3, s3, 8 s_cmp_eq_u32 s2, 0 ds_load_b64 v[2:3], v2 s_waitcnt lgkmcnt(0) v_add_f64 v[0:1], v[0:1], v[2:3] s_cbranch_scc0 .LBB0_3 .LBB0_4: s_load_b64 s[0:1], s[0:1], 0x18 v_mov_b32_e32 v2, 0 s_waitcnt lgkmcnt(0) global_store_b64 v2, v[0:1], s[0:1] .LBB0_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18kernel_dot_productPKdS0_iPd .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 32 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z18kernel_dot_productPKdS0_iPd, .Lfunc_end0-_Z18kernel_dot_productPKdS0_iPd .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 32 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18kernel_dot_productPKdS0_iPd .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z18kernel_dot_productPKdS0_iPd.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void kernel_dot_product(const double * vec1, const double * vec2, int numElements, double * answer) { extern __shared__ double products[]; // one element per thread int i = threadIdx.x; // numElements assumed to fit into one block products[i] = vec1[i] * vec2[i]; __syncthreads(); if (i == 0) { double sum = 0; for (int j = 0; j < numElements; ++j) { sum += products[j]; } *answer = sum; } }
.text .file "kernel_dot_product.hip" .globl _Z33__device_stub__kernel_dot_productPKdS0_iPd # -- Begin function _Z33__device_stub__kernel_dot_productPKdS0_iPd .p2align 4, 0x90 .type _Z33__device_stub__kernel_dot_productPKdS0_iPd,@function _Z33__device_stub__kernel_dot_productPKdS0_iPd: # @_Z33__device_stub__kernel_dot_productPKdS0_iPd .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 4(%rsp) movq %rcx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 4(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18kernel_dot_productPKdS0_iPd, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z33__device_stub__kernel_dot_productPKdS0_iPd, .Lfunc_end0-_Z33__device_stub__kernel_dot_productPKdS0_iPd .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18kernel_dot_productPKdS0_iPd, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z18kernel_dot_productPKdS0_iPd,@object # @_Z18kernel_dot_productPKdS0_iPd .section .rodata,"a",@progbits .globl _Z18kernel_dot_productPKdS0_iPd .p2align 3, 0x0 _Z18kernel_dot_productPKdS0_iPd: .quad _Z33__device_stub__kernel_dot_productPKdS0_iPd .size _Z18kernel_dot_productPKdS0_iPd, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z18kernel_dot_productPKdS0_iPd" .size .L__unnamed_1, 32 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z33__device_stub__kernel_dot_productPKdS0_iPd .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z18kernel_dot_productPKdS0_iPd .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z18kernel_dot_productPKdS0_iPd .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */ /* 0x000e220000002100 */ /*0020*/ IMAD.MOV.U32 R4, RZ, RZ, 0x8 ; /* 0x00000008ff047424 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fc60000000a00 */ /*0040*/ IMAD.WIDE R2, R9, R4, c[0x0][0x160] ; /* 0x0000580009027625 */ /* 0x001fc800078e0204 */ /*0050*/ IMAD.WIDE R4, R9.reuse, R4, c[0x0][0x168] ; /* 0x00005a0009047625 */ /* 0x040fe400078e0204 */ /*0060*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000602027981 */ /* 0x000ea8000c1e1b00 */ /*0070*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000604047981 */ /* 0x000ea2000c1e1b00 */ /*0080*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */ /* 0x000fe20003f05270 */ /*0090*/ DMUL R6, R4, R2 ; /* 0x0000000204067228 */ /* 0x004e0e0000000000 */ /*00a0*/ STS.64 [R9.X8], R6 ; /* 0x0000000609007388 */ /* 0x0011e80000008a00 */ /*00b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*00c0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00d0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff027624 */ /* 0x001fe200078e00ff */ /*00e0*/ CS2R R20, SRZ ; /* 0x0000000000147805 */ /* 0x000fc8000001ff00 */ /*00f0*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */ /* 0x000fda0003f06270 */ /*0100*/ @!P0 BRA 0x660 ; /* 0x0000055000008947 */ /* 0x000fea0003800000 */ /*0110*/ IADD3 R0, R2.reuse, -0x1, RZ ; /* 0xffffffff02007810 */ /* 0x040fe20007ffe0ff */ /*0120*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fe20008000000 */ /*0130*/ LOP3.LUT R2, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302027812 */ /* 0x000fe200078ec0ff */ /*0140*/ CS2R R20, SRZ ; /* 0x0000000000147805 */ /* 0x000fe2000001ff00 */ /*0150*/ ISETP.GE.U32.AND P0, PT, R0, 0x3, PT ; /* 0x000000030000780c */ /* 0x000fda0003f06070 */ /*0160*/ @!P0 BRA 0x5d0 ; /* 0x0000046000008947 */ /* 0x000fea0003800000 */ /*0170*/ IADD3 R0, -R2, c[0x0][0x170], RZ ; /* 0x00005c0002007a10 */ /* 0x000fe20007ffe1ff */ /*0180*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fe20008000000 */ /*0190*/ CS2R R20, SRZ ; /* 0x0000000000147805 */ /* 0x000fe2000001ff00 */ /*01a0*/ UMOV UR5, URZ ; /* 0x0000003f00057c82 */ /* 0x000fe20008000000 */ /*01b0*/ ISETP.GT.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fda0003f04270 */ /*01c0*/ @!P0 BRA 0x520 ; /* 0x0000035000008947 */ /* 0x000fea0003800000 */ /*01d0*/ ISETP.GT.AND P1, PT, R0, 0xc, PT ; /* 0x0000000c0000780c */ /* 0x000fe40003f24270 */ /*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*01f0*/ @!P1 BRA 0x3e0 ; /* 0x000001e000009947 */ /* 0x000fea0003800000 */ /*0200*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0210*/ LDS.128 R4, [UR5] ; /* 0x00000005ff047984 */ /* 0x001e220008000c00 */ /*0220*/ IADD3 R0, R0, -0x10, RZ ; /* 0xfffffff000007810 */ /* 0x000fe20007ffe0ff */ /*0230*/ UIADD3 UR4, UR4, 0x10, URZ ; /* 0x0000001004047890 */ /* 0x000fe4000fffe03f */ /*0240*/ LDS.128 R8, [UR5+0x10] ; /* 0x00001005ff087984 */ /* 0x002e620008000c00 */ /*0250*/ ISETP.GT.AND P1, PT, R0, 0xc, PT ; /* 0x0000000c0000780c */ /* 0x000fc60003f24270 */ /*0260*/ LDS.128 R12, [UR5+0x20] ; /* 0x00002005ff0c7984 */ /* 0x000ea80008000c00 */ /*0270*/ LDS.128 R16, [UR5+0x30] ; /* 0x00003005ff107984 */ /* 0x000ee20008000c00 */ /*0280*/ DADD R4, R4, R20 ; /* 0x0000000004047229 */ /* 0x001e0c0000000014 */ /*0290*/ DADD R20, R6, R4 ; /* 0x0000000006147229 */ /* 0x0010480000000004 */ /*02a0*/ LDS.128 R4, [UR5+0x40] ; /* 0x00004005ff047984 */ /* 0x001e240008000c00 */ /*02b0*/ DADD R8, R20, R8 ; /* 0x0000000014087229 */ /* 0x002e4c0000000008 */ /*02c0*/ DADD R20, R10, R8 ; /* 0x000000000a147229 */ /* 0x0022880000000008 */ /*02d0*/ LDS.128 R8, [UR5+0x50] ; /* 0x00005005ff087984 */ /* 0x002e640008000c00 */ /*02e0*/ DADD R12, R20, R12 ; /* 0x00000000140c7229 */ /* 0x004e8c000000000c */ /*02f0*/ DADD R20, R14, R12 ; /* 0x000000000e147229 */ /* 0x0044c8000000000c */ /*0300*/ LDS.128 R12, [UR5+0x60] ; /* 0x00006005ff0c7984 */ /* 0x004ea40008000c00 */ /*0310*/ DADD R16, R20, R16 ; /* 0x0000000014107229 */ /* 0x008ecc0000000010 */ /*0320*/ DADD R20, R18, R16 ; /* 0x0000000012147229 */ /* 0x0086080000000010 */ /*0330*/ LDS.128 R16, [UR5+0x70] ; /* 0x00007005ff107984 */ /* 0x008ee20008000c00 */ /*0340*/ UIADD3 UR5, UR5, 0x80, URZ ; /* 0x0000008005057890 */ /* 0x000fe2000fffe03f */ /*0350*/ DADD R4, R20, R4 ; /* 0x0000000014047229 */ /* 0x001e0c0000000004 */ /*0360*/ DADD R4, R6, R4 ; /* 0x0000000006047229 */ /* 0x001e4c0000000004 */ /*0370*/ DADD R4, R4, R8 ; /* 0x0000000004047229 */ /* 0x002e0c0000000008 */ /*0380*/ DADD R4, R10, R4 ; /* 0x000000000a047229 */ /* 0x001e8c0000000004 */ /*0390*/ DADD R4, R4, R12 ; /* 0x0000000004047229 */ /* 0x004e0c000000000c */ /*03a0*/ DADD R4, R14, R4 ; /* 0x000000000e047229 */ /* 0x001ecc0000000004 */ /*03b0*/ DADD R4, R4, R16 ; /* 0x0000000004047229 */ /* 0x008e0c0000000010 */ /*03c0*/ DADD R20, R18, R4 ; /* 0x0000000012147229 */ /* 0x0010620000000004 */ /*03d0*/ @P1 BRA 0x210 ; /* 0xfffffe3000001947 */ /* 0x000fea000383ffff */ /*03e0*/ ISETP.GT.AND P1, PT, R0, 0x4, PT ; /* 0x000000040000780c */ /* 0x000fda0003f24270 */ /*03f0*/ @!P1 BRA 0x500 ; /* 0x0000010000009947 */ /* 0x000fea0003800000 */ /*0400*/ LDS.128 R4, [UR5] ; /* 0x00000005ff047984 */ /* 0x001e220008000c00 */ /*0410*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe20003f0e170 */ /*0420*/ UIADD3 UR4, UR4, 0x8, URZ ; /* 0x0000000804047890 */ /* 0x000fe2000fffe03f */ /*0430*/ IADD3 R0, R0, -0x8, RZ ; /* 0xfffffff800007810 */ /* 0x000fe20007ffe0ff */ /*0440*/ LDS.128 R8, [UR5+0x10] ; /* 0x00001005ff087984 */ /* 0x000ea80008000c00 */ /*0450*/ LDS.128 R12, [UR5+0x20] ; /* 0x00002005ff0c7984 */ /* 0x000ee80008000c00 */ /*0460*/ LDS.128 R16, [UR5+0x30] ; /* 0x00003005ff107984 */ /* 0x000f220008000c00 */ /*0470*/ UIADD3 UR5, UR5, 0x40, URZ ; /* 0x0000004005057890 */ /* 0x000fe2000fffe03f */ /*0480*/ DADD R4, R20, R4 ; /* 0x0000000014047229 */ /* 0x003e0c0000000004 */ /*0490*/ DADD R4, R6, R4 ; /* 0x0000000006047229 */ /* 0x001e8c0000000004 */ /*04a0*/ DADD R4, R4, R8 ; /* 0x0000000004047229 */ /* 0x004e0c0000000008 */ /*04b0*/ DADD R4, R10, R4 ; /* 0x000000000a047229 */ /* 0x001ecc0000000004 */ /*04c0*/ DADD R4, R4, R12 ; /* 0x0000000004047229 */ /* 0x008e0c000000000c */ /*04d0*/ DADD R4, R14, R4 ; /* 0x000000000e047229 */ /* 0x001f0c0000000004 */ /*04e0*/ DADD R4, R4, R16 ; /* 0x0000000004047229 */ /* 0x010e0c0000000010 */ /*04f0*/ DADD R20, R18, R4 ; /* 0x0000000012147229 */ /* 0x0010480000000004 */ /*0500*/ ISETP.NE.OR P0, PT, R0, RZ, P0 ; /* 0x000000ff0000720c */ /* 0x000fda0000705670 */ /*0510*/ @!P0 BRA 0x5d0 ; /* 0x000000b000008947 */ /* 0x000fea0003800000 */ /*0520*/ LDS.128 R8, [UR5] ; /* 0x00000005ff087984 */ /* 0x000ea20008000c00 */ /*0530*/ IADD3 R0, R0, -0x4, RZ ; /* 0xfffffffc00007810 */ /* 0x000fe20007ffe0ff */ /*0540*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */ /* 0x000fe4000fffe03f */ /*0550*/ LDS.128 R4, [UR5+0x10] ; /* 0x00001005ff047984 */ /* 0x001e220008000c00 */ /*0560*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fe20003f05270 */ /*0570*/ UIADD3 UR5, UR5, 0x20, URZ ; /* 0x0000002005057890 */ /* 0x000fe2000fffe03f */ /*0580*/ DADD R8, R8, R20 ; /* 0x0000000008087229 */ /* 0x006e4c0000000014 */ /*0590*/ DADD R8, R10, R8 ; /* 0x000000000a087229 */ /* 0x002e0c0000000008 */ /*05a0*/ DADD R4, R8, R4 ; /* 0x0000000008047229 */ /* 0x001e0c0000000004 */ /*05b0*/ DADD R20, R6, R4 ; /* 0x0000000006147229 */ /* 0x0010640000000004 */ /*05c0*/ @P0 BRA 0x520 ; /* 0xffffff5000000947 */ /* 0x003fea000383ffff */ /*05d0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fda0003f05270 */ /*05e0*/ @!P0 BRA 0x660 ; /* 0x0000007000008947 */ /* 0x000fea0003800000 */ /*05f0*/ USHF.L.U32 UR4, UR4, 0x3, URZ ; /* 0x0000000304047899 */ /* 0x000fd2000800063f */ /*0600*/ LDS.64 R4, [UR4] ; /* 0x00000004ff047984 */ /* 0x001e220008000a00 */ /*0610*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */ /* 0x000fe20007ffe0ff */ /*0620*/ UIADD3 UR4, UR4, 0x8, URZ ; /* 0x0000000804047890 */ /* 0x000fc6000fffe03f */ /*0630*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fe20003f05270 */ /*0640*/ DADD R20, R4, R20 ; /* 0x0000000004147229 */ /* 0x0030580000000014 */ /*0650*/ @P0 BRA 0x600 ; /* 0xffffffa000000947 */ /* 0x000fea000383ffff */ /*0660*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff027624 */ /* 0x000fe400078e00ff */ /*0670*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff037624 */ /* 0x000fca00078e00ff */ /*0680*/ STG.E.64 [R2.64], R20 ; /* 0x0000001402007986 */ /* 0x002fe2000c101b06 */ /*0690*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*06a0*/ BRA 0x6a0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*06b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*06c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*06d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*06e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*06f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0700*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0710*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0720*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0730*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0740*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0750*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0760*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0770*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z18kernel_dot_productPKdS0_iPd .globl _Z18kernel_dot_productPKdS0_iPd .p2align 8 .type _Z18kernel_dot_productPKdS0_iPd,@function _Z18kernel_dot_productPKdS0_iPd: s_load_b128 s[4:7], s[0:1], 0x0 v_lshlrev_b32_e32 v5, 3, v0 s_mov_b32 s2, exec_lo s_waitcnt lgkmcnt(0) s_clause 0x1 global_load_b64 v[1:2], v5, s[4:5] global_load_b64 v[3:4], v5, s[6:7] s_waitcnt vmcnt(0) v_mul_f64 v[1:2], v[1:2], v[3:4] v_add_nc_u32_e32 v3, 0, v5 ds_store_b64 v3, v[1:2] s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv v_cmpx_eq_u32_e32 0, v0 s_cbranch_execz .LBB0_5 s_load_b32 s2, s[0:1], 0x10 v_mov_b32_e32 v0, 0 v_mov_b32_e32 v1, 0 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB0_4 s_mov_b32 s3, 0 .LBB0_3: s_delay_alu instid0(SALU_CYCLE_1) v_mov_b32_e32 v2, s3 s_add_i32 s2, s2, -1 s_add_i32 s3, s3, 8 s_cmp_eq_u32 s2, 0 ds_load_b64 v[2:3], v2 s_waitcnt lgkmcnt(0) v_add_f64 v[0:1], v[0:1], v[2:3] s_cbranch_scc0 .LBB0_3 .LBB0_4: s_load_b64 s[0:1], s[0:1], 0x18 v_mov_b32_e32 v2, 0 s_waitcnt lgkmcnt(0) global_store_b64 v2, v[0:1], s[0:1] .LBB0_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18kernel_dot_productPKdS0_iPd .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 32 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z18kernel_dot_productPKdS0_iPd, .Lfunc_end0-_Z18kernel_dot_productPKdS0_iPd .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 32 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18kernel_dot_productPKdS0_iPd .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z18kernel_dot_productPKdS0_iPd.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00015f2f_00000000-6_kernel_dot_product.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z45__device_stub__Z18kernel_dot_productPKdS0_iPdPKdS0_iPd .type _Z45__device_stub__Z18kernel_dot_productPKdS0_iPdPKdS0_iPd, @function _Z45__device_stub__Z18kernel_dot_productPKdS0_iPdPKdS0_iPd: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z18kernel_dot_productPKdS0_iPd(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z45__device_stub__Z18kernel_dot_productPKdS0_iPdPKdS0_iPd, .-_Z45__device_stub__Z18kernel_dot_productPKdS0_iPdPKdS0_iPd .globl _Z18kernel_dot_productPKdS0_iPd .type _Z18kernel_dot_productPKdS0_iPd, @function _Z18kernel_dot_productPKdS0_iPd: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z45__device_stub__Z18kernel_dot_productPKdS0_iPdPKdS0_iPd addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z18kernel_dot_productPKdS0_iPd, .-_Z18kernel_dot_productPKdS0_iPd .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z18kernel_dot_productPKdS0_iPd" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z18kernel_dot_productPKdS0_iPd(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "kernel_dot_product.hip" .globl _Z33__device_stub__kernel_dot_productPKdS0_iPd # -- Begin function _Z33__device_stub__kernel_dot_productPKdS0_iPd .p2align 4, 0x90 .type _Z33__device_stub__kernel_dot_productPKdS0_iPd,@function _Z33__device_stub__kernel_dot_productPKdS0_iPd: # @_Z33__device_stub__kernel_dot_productPKdS0_iPd .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 4(%rsp) movq %rcx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 4(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18kernel_dot_productPKdS0_iPd, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z33__device_stub__kernel_dot_productPKdS0_iPd, .Lfunc_end0-_Z33__device_stub__kernel_dot_productPKdS0_iPd .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18kernel_dot_productPKdS0_iPd, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z18kernel_dot_productPKdS0_iPd,@object # @_Z18kernel_dot_productPKdS0_iPd .section .rodata,"a",@progbits .globl _Z18kernel_dot_productPKdS0_iPd .p2align 3, 0x0 _Z18kernel_dot_productPKdS0_iPd: .quad _Z33__device_stub__kernel_dot_productPKdS0_iPd .size _Z18kernel_dot_productPKdS0_iPd, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z18kernel_dot_productPKdS0_iPd" .size .L__unnamed_1, 32 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z33__device_stub__kernel_dot_productPKdS0_iPd .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z18kernel_dot_productPKdS0_iPd .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
/* * Rectangular matrix multiplication * A[M][K] * B[k][N] = C[M][N] * */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/timeb.h> #include <string.h> /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } /* read timer in ms */ double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } #define REAL float void init(int M, int N, REAL * A) { int i, j; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { A[i*N+j] = (REAL) drand48(); } } } double maxerror(int M, int N, REAL * A, REAL *B) { int i, j; double error = 0.0; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { double diff = (A[i*N+j] - B[i*N+j]) / A[i*N+j]; if (diff < 0) diff = -diff; if (diff > error) error = diff; } } return error; } void matmul_base(int N, REAL *A, REAL * B, REAL *C); void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks); void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C); void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C); void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C); int main(int argc, char *argv[]) { int N; int num_tasks = 5; /* 5 is default number of tasks */ double elapsed_base, elapsed_openmp, elapsed_cuda_v1, elapsed_cuda_v2, elapsed_cuda_v3; /* for timing */ if (argc < 2) { fprintf(stderr, "Usage: matmul <n> [<#tasks(%d)>]\n", num_tasks); exit(1); } N = atoi(argv[1]); if (argc > 2) num_tasks = atoi(argv[2]); REAL * heap_buffer = (REAL*)malloc(sizeof(REAL)*N*N*4); /* we use 5 matrix in this example */ /* below is a cast from memory buffer to a 2-d row-major array */ REAL *A = heap_buffer; REAL *B = &heap_buffer[N*N]; REAL *C_base = &heap_buffer[2*N*N]; REAL *C_openmp = &heap_buffer[3*N*N]; srand48((1 << 12)); init(N, N, A); init(N, N, B); /* example run */ elapsed_base = read_timer(); matmul_base(N, A, B, C_base); elapsed_base = (read_timer() - elapsed_base); elapsed_openmp = read_timer(); matmul_openmp(N, A, B, C_openmp, num_tasks); elapsed_openmp = (read_timer() - elapsed_openmp); /* call and timing for the three CUDA versions */ /* there are three devices you can use on gpu.secs.oakland.edu, 0, 2, 3. * 1 is a graphical card with less computation capability. */ cudaSetDevice(0); //call and time for matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C); //call and time for matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C); //call and time for matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C); printf("======================================================================================================\n"); printf("Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n", N, num_tasks); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("matmul_base:\t\t%4f\t%4f \t\t%g\n", elapsed_base * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_base)), maxerror(N, N, C_base, C_base)); printf("matmul_openmp:\t\t%4f\t%4f \t\t%g\n", elapsed_openmp * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_openmp)), maxerror(N, N, C_base, C_openmp)); /* put other printf statements for outputing results for GPU execution */ free(heap_buffer); return 0; } void matmul_base(int N, REAL *A, REAL * B, REAL *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks) { int i, j, k; #pragma omp parallel for shared(N,A,B,C,num_tasks) private(i,j,k) num_threads(num_tasks) for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } /* * call to kernel that uses GPU global memory */ void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C) { } /* * call to kernel that use GPU shared memory */ void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C) { } /* * call to sgemm of cublas library */ void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C) { }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* * Rectangular matrix multiplication * A[M][K] * B[k][N] = C[M][N] * */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/timeb.h> #include <string.h> /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } /* read timer in ms */ double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } #define REAL float void init(int M, int N, REAL * A) { int i, j; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { A[i*N+j] = (REAL) drand48(); } } } double maxerror(int M, int N, REAL * A, REAL *B) { int i, j; double error = 0.0; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { double diff = (A[i*N+j] - B[i*N+j]) / A[i*N+j]; if (diff < 0) diff = -diff; if (diff > error) error = diff; } } return error; } void matmul_base(int N, REAL *A, REAL * B, REAL *C); void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks); void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C); void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C); void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C); int main(int argc, char *argv[]) { int N; int num_tasks = 5; /* 5 is default number of tasks */ double elapsed_base, elapsed_openmp, elapsed_cuda_v1, elapsed_cuda_v2, elapsed_cuda_v3; /* for timing */ if (argc < 2) { fprintf(stderr, "Usage: matmul <n> [<#tasks(%d)>]\n", num_tasks); exit(1); } N = atoi(argv[1]); if (argc > 2) num_tasks = atoi(argv[2]); REAL * heap_buffer = (REAL*)malloc(sizeof(REAL)*N*N*4); /* we use 5 matrix in this example */ /* below is a cast from memory buffer to a 2-d row-major array */ REAL *A = heap_buffer; REAL *B = &heap_buffer[N*N]; REAL *C_base = &heap_buffer[2*N*N]; REAL *C_openmp = &heap_buffer[3*N*N]; srand48((1 << 12)); init(N, N, A); init(N, N, B); /* example run */ elapsed_base = read_timer(); matmul_base(N, A, B, C_base); elapsed_base = (read_timer() - elapsed_base); elapsed_openmp = read_timer(); matmul_openmp(N, A, B, C_openmp, num_tasks); elapsed_openmp = (read_timer() - elapsed_openmp); /* call and timing for the three CUDA versions */ /* there are three devices you can use on gpu.secs.oakland.edu, 0, 2, 3. * 1 is a graphical card with less computation capability. */ cudaSetDevice(0); //call and time for matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C); //call and time for matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C); //call and time for matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C); printf("======================================================================================================\n"); printf("Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n", N, num_tasks); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("matmul_base:\t\t%4f\t%4f \t\t%g\n", elapsed_base * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_base)), maxerror(N, N, C_base, C_base)); printf("matmul_openmp:\t\t%4f\t%4f \t\t%g\n", elapsed_openmp * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_openmp)), maxerror(N, N, C_base, C_openmp)); /* put other printf statements for outputing results for GPU execution */ free(heap_buffer); return 0; } void matmul_base(int N, REAL *A, REAL * B, REAL *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks) { int i, j, k; #pragma omp parallel for shared(N,A,B,C,num_tasks) private(i,j,k) num_threads(num_tasks) for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } /* * call to kernel that uses GPU global memory */ void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C) { } /* * call to kernel that use GPU shared memory */ void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C) { } /* * call to sgemm of cublas library */ void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C) { }
.file "tmpxft_001b6161_00000000-6_matmul.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2069: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2069: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z10read_timerv .type _Z10read_timerv, @function _Z10read_timerv: .LFB2057: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movq %fs:40, %rax movq %rax, 24(%rsp) xorl %eax, %eax movq %rsp, %rdi call ftime@PLT movzwl 8(%rsp), %eax pxor %xmm0, %xmm0 cvtsi2sdl %eax, %xmm0 divsd .LC0(%rip), %xmm0 pxor %xmm1, %xmm1 cvtsi2sdq (%rsp), %xmm1 addsd %xmm1, %xmm0 movq 24(%rsp), %rax subq %fs:40, %rax jne .L6 addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L6: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z10read_timerv, .-_Z10read_timerv .globl _Z13read_timer_msv .type _Z13read_timer_msv, @function _Z13read_timer_msv: .LFB2058: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movq %fs:40, %rax movq %rax, 24(%rsp) xorl %eax, %eax movq %rsp, %rdi call ftime@PLT pxor %xmm0, %xmm0 cvtsi2sdq (%rsp), %xmm0 mulsd .LC0(%rip), %xmm0 movzwl 8(%rsp), %eax pxor %xmm1, %xmm1 cvtsi2sdl %eax, %xmm1 addsd %xmm1, %xmm0 movq 24(%rsp), %rax subq %fs:40, %rax jne .L10 addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L10: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size _Z13read_timer_msv, .-_Z13read_timer_msv .globl _Z4initiiPf .type _Z4initiiPf, @function _Z4initiiPf: .LFB2059: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $24, %rsp .cfi_def_cfa_offset 80 movl %edi, 4(%rsp) testl %edi, %edi jle .L11 movl %esi, %r14d movq %rdx, %r15 movl $0, %r13d movl $0, %r12d movslq %esi, %rax movq %rax, 8(%rsp) jmp .L13 .L15: movslq %r13d, %rax leaq (%r15,%rax,4), %rbx movq 8(%rsp), %rcx addq %rcx, %rax leaq (%r15,%rax,4), %rbp .L14: call drand48@PLT cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L14 .L16: addl $1, %r12d addl %r14d, %r13d cmpl %r12d, 4(%rsp) je .L11 .L13: testl %r14d, %r14d jg .L15 jmp .L16 .L11: addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _Z4initiiPf, .-_Z4initiiPf .globl _Z8maxerroriiPfS_ .type _Z8maxerroriiPfS_, @function _Z8maxerroriiPfS_: .LFB2060: .cfi_startproc endbr64 movl %edi, %r8d movl %esi, %r9d movq %rdx, %rsi testl %edi, %edi jle .L28 movl $0, %r10d pxor %xmm2, %xmm2 movl $0, %edi movslq %r9d, %r11 pxor %xmm3, %xmm3 movq .LC3(%rip), %xmm4 jmp .L21 .L22: maxsd %xmm2, %xmm1 movapd %xmm1, %xmm2 addq $4, %rax cmpq %rdx, %rax je .L27 .L25: movss (%rsi,%rax), %xmm1 movaps %xmm1, %xmm0 subss (%rcx,%rax), %xmm0 divss %xmm1, %xmm0 pxor %xmm1, %xmm1 cvtss2sd %xmm0, %xmm1 comiss %xmm0, %xmm3 jbe .L22 xorpd %xmm4, %xmm1 jmp .L22 .L27: addl $1, %edi addl %r9d, %r10d cmpl %edi, %r8d je .L19 .L21: testl %r9d, %r9d jle .L27 movslq %r10d, %rdx leaq 0(,%rdx,4), %rax addq %r11, %rdx salq $2, %rdx jmp .L25 .L28: pxor %xmm2, %xmm2 .L19: movapd %xmm2, %xmm0 ret .cfi_endproc .LFE2060: .size _Z8maxerroriiPfS_, .-_Z8maxerroriiPfS_ .globl _Z11matmul_baseiPfS_S_ .type _Z11matmul_baseiPfS_S_, @function _Z11matmul_baseiPfS_S_: .LFB2062: .cfi_startproc endbr64 testl %edi, %edi jle .L40 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movl %edi, %r10d movq %rdx, %rbp movq %rcx, %r9 movslq %edi, %rbx leaq 0(,%rbx,4), %rcx movq %rsi, %r11 addq %rcx, %rsi movl $0, %r12d .L34: movq %rbp, %r8 movl $0, %edi .L37: movq %r8, %rdx movq %r11, %rax pxor %xmm1, %xmm1 .L35: movss (%rax), %xmm0 mulss (%rdx), %xmm0 addss %xmm0, %xmm1 addq $4, %rax addq %rcx, %rdx cmpq %rsi, %rax jne .L35 movss %xmm1, (%r9,%rdi,4) addq $1, %rdi addq $4, %r8 cmpq %rbx, %rdi jne .L37 addl $1, %r12d addq %rcx, %r9 addq %rcx, %r11 addq %rcx, %rsi cmpl %r12d, %r10d jne .L34 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L40: .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 ret .cfi_endproc .LFE2062: .size _Z11matmul_baseiPfS_S_, .-_Z11matmul_baseiPfS_S_ .globl _Z13matmul_openmpiPfS_S_i .type _Z13matmul_openmpiPfS_S_i, @function _Z13matmul_openmpiPfS_S_i: .LFB2063: .cfi_startproc endbr64 testl %edi, %edi jle .L51 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movl %edi, %r10d movq %rdx, %rbp movq %rcx, %r9 movslq %edi, %rbx leaq 0(,%rbx,4), %rcx movq %rsi, %r11 addq %rcx, %rsi movl $0, %r12d .L45: movq %rbp, %r8 movl $0, %edi .L48: movq %r8, %rdx movq %r11, %rax pxor %xmm1, %xmm1 .L46: movss (%rax), %xmm0 mulss (%rdx), %xmm0 addss %xmm0, %xmm1 addq $4, %rax addq %rcx, %rdx cmpq %rsi, %rax jne .L46 movss %xmm1, (%r9,%rdi,4) addq $1, %rdi addq $4, %r8 cmpq %rbx, %rdi jne .L48 addl $1, %r12d addq %rcx, %r9 addq %rcx, %r11 addq %rcx, %rsi cmpl %r12d, %r10d jne .L45 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L51: .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 ret .cfi_endproc .LFE2063: .size _Z13matmul_openmpiPfS_S_i, .-_Z13matmul_openmpiPfS_S_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC4: .string "Usage: matmul <n> [<#tasks(%d)>]\n" .align 8 .LC5: .string "======================================================================================================\n" .align 8 .LC6: .string "Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n" .align 8 .LC7: .string "------------------------------------------------------------------------------------------------------\n" .align 8 .LC8: .string "Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC10: .string "matmul_base:\t\t%4f\t%4f \t\t%g\n" .LC11: .string "matmul_openmp:\t\t%4f\t%4f \t\t%g\n" .text .globl main .type main, @function main: .LFB2061: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $40, %rsp .cfi_def_cfa_offset 96 cmpl $1, %edi jle .L59 movl %edi, %ebp movq %rsi, %r13 movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %r12 movl %eax, %ebx movl $5, 12(%rsp) cmpl $2, %ebp jg .L60 .L56: movslq %r12d, %r13 imulq %r13, %r13 movq %r13, %rdi salq $4, %rdi call malloc@PLT movq %rax, %rbp movl %r12d, %eax imull %r12d, %eax leaq 0(%rbp,%r13,4), %r14 leal (%rax,%rax), %edx movslq %edx, %rdx leaq 0(%rbp,%rdx,4), %r13 cltq leaq 0(%r13,%rax,4), %r15 movl $4096, %edi call srand48@PLT movq %rbp, %rdx movl %ebx, %esi movl %ebx, %edi call _Z4initiiPf movq %r14, %rdx movl %ebx, %esi movl %ebx, %edi call _Z4initiiPf call _Z10read_timerv movsd %xmm0, 16(%rsp) movq %r13, %rcx movq %r14, %rdx movq %rbp, %rsi movl %ebx, %edi call _Z11matmul_baseiPfS_S_ call _Z10read_timerv movapd %xmm0, %xmm4 subsd 16(%rsp), %xmm4 movsd %xmm4, 16(%rsp) call _Z10read_timerv movsd %xmm0, 24(%rsp) movl 12(%rsp), %r8d movq %r15, %rcx movq %r14, %rdx movq %rbp, %rsi movl %ebx, %edi call _Z13matmul_openmpiPfS_S_i call _Z10read_timerv movapd %xmm0, %xmm5 subsd 24(%rsp), %xmm5 movsd %xmm5, 24(%rsp) movl $0, %edi call cudaSetDevice@PLT leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 12(%rsp), %ecx movl %ebx, %edx leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC7(%rip), %r14 movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %r13, %rcx movq %r13, %rdx movl %ebx, %esi movl %ebx, %edi call _Z8maxerroriiPfS_ movapd %xmm0, %xmm2 pxor %xmm1, %xmm1 cvtsi2sdl %r12d, %xmm1 movapd %xmm1, %xmm3 addsd %xmm1, %xmm3 mulsd %xmm1, %xmm3 mulsd %xmm1, %xmm3 movq %xmm3, %r12 movsd .LC9(%rip), %xmm1 movsd 16(%rsp), %xmm4 mulsd %xmm4, %xmm1 movsd .LC0(%rip), %xmm0 mulsd %xmm4, %xmm0 movapd %xmm3, %xmm6 divsd %xmm1, %xmm6 movapd %xmm6, %xmm1 leaq .LC10(%rip), %rsi movl $2, %edi movl $3, %eax call __printf_chk@PLT movq %r15, %rcx movq %r13, %rdx movl %ebx, %esi movl %ebx, %edi call _Z8maxerroriiPfS_ movapd %xmm0, %xmm2 movsd .LC9(%rip), %xmm1 movsd 24(%rsp), %xmm5 mulsd %xmm5, %xmm1 movsd .LC0(%rip), %xmm0 mulsd %xmm5, %xmm0 movq %r12, %xmm7 divsd %xmm1, %xmm7 movapd %xmm7, %xmm1 leaq .LC11(%rip), %rsi movl $2, %edi movl $3, %eax call __printf_chk@PLT movq %rbp, %rdi call free@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L59: .cfi_restore_state movl $5, %ecx leaq .LC4(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L60: movq 16(%r13), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movl %eax, 12(%rsp) jmp .L56 .cfi_endproc .LFE2061: .size main, .-main .globl _Z22matmul_cuda_v1_vanillaiPfS_S_ .type _Z22matmul_cuda_v1_vanillaiPfS_S_, @function _Z22matmul_cuda_v1_vanillaiPfS_S_: .LFB2064: .cfi_startproc endbr64 ret .cfi_endproc .LFE2064: .size _Z22matmul_cuda_v1_vanillaiPfS_S_, .-_Z22matmul_cuda_v1_vanillaiPfS_S_ .globl _Z20matmul_cuda_v1_shmemiPfS_S_ .type _Z20matmul_cuda_v1_shmemiPfS_S_, @function _Z20matmul_cuda_v1_shmemiPfS_S_: .LFB2065: .cfi_startproc endbr64 ret .cfi_endproc .LFE2065: .size _Z20matmul_cuda_v1_shmemiPfS_S_, .-_Z20matmul_cuda_v1_shmemiPfS_S_ .globl _Z21matmul_cuda_v1_cublasiPfS_S_ .type _Z21matmul_cuda_v1_cublasiPfS_S_, @function _Z21matmul_cuda_v1_cublasiPfS_S_: .LFB2066: .cfi_startproc endbr64 ret .cfi_endproc .LFE2066: .size _Z21matmul_cuda_v1_cublasiPfS_S_, .-_Z21matmul_cuda_v1_cublasiPfS_S_ .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2092: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2092: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 0 .long 1083129856 .section .rodata.cst16,"aM",@progbits,16 .align 16 .LC3: .long 0 .long -2147483648 .long 0 .long 0 .section .rodata.cst8 .align 8 .LC9: .long 0 .long 1093567616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* * Rectangular matrix multiplication * A[M][K] * B[k][N] = C[M][N] * */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/timeb.h> #include <string.h> /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } /* read timer in ms */ double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } #define REAL float void init(int M, int N, REAL * A) { int i, j; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { A[i*N+j] = (REAL) drand48(); } } } double maxerror(int M, int N, REAL * A, REAL *B) { int i, j; double error = 0.0; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { double diff = (A[i*N+j] - B[i*N+j]) / A[i*N+j]; if (diff < 0) diff = -diff; if (diff > error) error = diff; } } return error; } void matmul_base(int N, REAL *A, REAL * B, REAL *C); void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks); void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C); void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C); void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C); int main(int argc, char *argv[]) { int N; int num_tasks = 5; /* 5 is default number of tasks */ double elapsed_base, elapsed_openmp, elapsed_cuda_v1, elapsed_cuda_v2, elapsed_cuda_v3; /* for timing */ if (argc < 2) { fprintf(stderr, "Usage: matmul <n> [<#tasks(%d)>]\n", num_tasks); exit(1); } N = atoi(argv[1]); if (argc > 2) num_tasks = atoi(argv[2]); REAL * heap_buffer = (REAL*)malloc(sizeof(REAL)*N*N*4); /* we use 5 matrix in this example */ /* below is a cast from memory buffer to a 2-d row-major array */ REAL *A = heap_buffer; REAL *B = &heap_buffer[N*N]; REAL *C_base = &heap_buffer[2*N*N]; REAL *C_openmp = &heap_buffer[3*N*N]; srand48((1 << 12)); init(N, N, A); init(N, N, B); /* example run */ elapsed_base = read_timer(); matmul_base(N, A, B, C_base); elapsed_base = (read_timer() - elapsed_base); elapsed_openmp = read_timer(); matmul_openmp(N, A, B, C_openmp, num_tasks); elapsed_openmp = (read_timer() - elapsed_openmp); /* call and timing for the three CUDA versions */ /* there are three devices you can use on gpu.secs.oakland.edu, 0, 2, 3. * 1 is a graphical card with less computation capability. */ cudaSetDevice(0); //call and time for matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C); //call and time for matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C); //call and time for matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C); printf("======================================================================================================\n"); printf("Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n", N, num_tasks); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("matmul_base:\t\t%4f\t%4f \t\t%g\n", elapsed_base * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_base)), maxerror(N, N, C_base, C_base)); printf("matmul_openmp:\t\t%4f\t%4f \t\t%g\n", elapsed_openmp * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_openmp)), maxerror(N, N, C_base, C_openmp)); /* put other printf statements for outputing results for GPU execution */ free(heap_buffer); return 0; } void matmul_base(int N, REAL *A, REAL * B, REAL *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks) { int i, j, k; #pragma omp parallel for shared(N,A,B,C,num_tasks) private(i,j,k) num_threads(num_tasks) for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } /* * call to kernel that uses GPU global memory */ void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C) { } /* * call to kernel that use GPU shared memory */ void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C) { } /* * call to sgemm of cublas library */ void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C) { }
/* * Rectangular matrix multiplication * A[M][K] * B[k][N] = C[M][N] * */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/timeb.h> #include <string.h> /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } /* read timer in ms */ double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } #define REAL float void init(int M, int N, REAL * A) { int i, j; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { A[i*N+j] = (REAL) drand48(); } } } double maxerror(int M, int N, REAL * A, REAL *B) { int i, j; double error = 0.0; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { double diff = (A[i*N+j] - B[i*N+j]) / A[i*N+j]; if (diff < 0) diff = -diff; if (diff > error) error = diff; } } return error; } void matmul_base(int N, REAL *A, REAL * B, REAL *C); void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks); void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C); void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C); void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C); int main(int argc, char *argv[]) { int N; int num_tasks = 5; /* 5 is default number of tasks */ double elapsed_base, elapsed_openmp, elapsed_cuda_v1, elapsed_cuda_v2, elapsed_cuda_v3; /* for timing */ if (argc < 2) { fprintf(stderr, "Usage: matmul <n> [<#tasks(%d)>]\n", num_tasks); exit(1); } N = atoi(argv[1]); if (argc > 2) num_tasks = atoi(argv[2]); REAL * heap_buffer = (REAL*)malloc(sizeof(REAL)*N*N*4); /* we use 5 matrix in this example */ /* below is a cast from memory buffer to a 2-d row-major array */ REAL *A = heap_buffer; REAL *B = &heap_buffer[N*N]; REAL *C_base = &heap_buffer[2*N*N]; REAL *C_openmp = &heap_buffer[3*N*N]; srand48((1 << 12)); init(N, N, A); init(N, N, B); /* example run */ elapsed_base = read_timer(); matmul_base(N, A, B, C_base); elapsed_base = (read_timer() - elapsed_base); elapsed_openmp = read_timer(); matmul_openmp(N, A, B, C_openmp, num_tasks); elapsed_openmp = (read_timer() - elapsed_openmp); /* call and timing for the three CUDA versions */ /* there are three devices you can use on gpu.secs.oakland.edu, 0, 2, 3. * 1 is a graphical card with less computation capability. */ hipSetDevice(0); //call and time for matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C); //call and time for matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C); //call and time for matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C); printf("======================================================================================================\n"); printf("Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n", N, num_tasks); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("matmul_base:\t\t%4f\t%4f \t\t%g\n", elapsed_base * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_base)), maxerror(N, N, C_base, C_base)); printf("matmul_openmp:\t\t%4f\t%4f \t\t%g\n", elapsed_openmp * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_openmp)), maxerror(N, N, C_base, C_openmp)); /* put other printf statements for outputing results for GPU execution */ free(heap_buffer); return 0; } void matmul_base(int N, REAL *A, REAL * B, REAL *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks) { int i, j, k; #pragma omp parallel for shared(N,A,B,C,num_tasks) private(i,j,k) num_threads(num_tasks) for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } /* * call to kernel that uses GPU global memory */ void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C) { } /* * call to kernel that use GPU shared memory */ void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C) { } /* * call to sgemm of cublas library */ void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C) { }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/* * Rectangular matrix multiplication * A[M][K] * B[k][N] = C[M][N] * */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/timeb.h> #include <string.h> /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } /* read timer in ms */ double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } #define REAL float void init(int M, int N, REAL * A) { int i, j; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { A[i*N+j] = (REAL) drand48(); } } } double maxerror(int M, int N, REAL * A, REAL *B) { int i, j; double error = 0.0; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { double diff = (A[i*N+j] - B[i*N+j]) / A[i*N+j]; if (diff < 0) diff = -diff; if (diff > error) error = diff; } } return error; } void matmul_base(int N, REAL *A, REAL * B, REAL *C); void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks); void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C); void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C); void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C); int main(int argc, char *argv[]) { int N; int num_tasks = 5; /* 5 is default number of tasks */ double elapsed_base, elapsed_openmp, elapsed_cuda_v1, elapsed_cuda_v2, elapsed_cuda_v3; /* for timing */ if (argc < 2) { fprintf(stderr, "Usage: matmul <n> [<#tasks(%d)>]\n", num_tasks); exit(1); } N = atoi(argv[1]); if (argc > 2) num_tasks = atoi(argv[2]); REAL * heap_buffer = (REAL*)malloc(sizeof(REAL)*N*N*4); /* we use 5 matrix in this example */ /* below is a cast from memory buffer to a 2-d row-major array */ REAL *A = heap_buffer; REAL *B = &heap_buffer[N*N]; REAL *C_base = &heap_buffer[2*N*N]; REAL *C_openmp = &heap_buffer[3*N*N]; srand48((1 << 12)); init(N, N, A); init(N, N, B); /* example run */ elapsed_base = read_timer(); matmul_base(N, A, B, C_base); elapsed_base = (read_timer() - elapsed_base); elapsed_openmp = read_timer(); matmul_openmp(N, A, B, C_openmp, num_tasks); elapsed_openmp = (read_timer() - elapsed_openmp); /* call and timing for the three CUDA versions */ /* there are three devices you can use on gpu.secs.oakland.edu, 0, 2, 3. * 1 is a graphical card with less computation capability. */ hipSetDevice(0); //call and time for matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C); //call and time for matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C); //call and time for matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C); printf("======================================================================================================\n"); printf("Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n", N, num_tasks); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("matmul_base:\t\t%4f\t%4f \t\t%g\n", elapsed_base * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_base)), maxerror(N, N, C_base, C_base)); printf("matmul_openmp:\t\t%4f\t%4f \t\t%g\n", elapsed_openmp * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_openmp)), maxerror(N, N, C_base, C_openmp)); /* put other printf statements for outputing results for GPU execution */ free(heap_buffer); return 0; } void matmul_base(int N, REAL *A, REAL * B, REAL *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks) { int i, j, k; #pragma omp parallel for shared(N,A,B,C,num_tasks) private(i,j,k) num_threads(num_tasks) for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } /* * call to kernel that uses GPU global memory */ void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C) { } /* * call to kernel that use GPU shared memory */ void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C) { } /* * call to sgemm of cublas library */ void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C) { }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* * Rectangular matrix multiplication * A[M][K] * B[k][N] = C[M][N] * */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/timeb.h> #include <string.h> /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } /* read timer in ms */ double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } #define REAL float void init(int M, int N, REAL * A) { int i, j; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { A[i*N+j] = (REAL) drand48(); } } } double maxerror(int M, int N, REAL * A, REAL *B) { int i, j; double error = 0.0; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { double diff = (A[i*N+j] - B[i*N+j]) / A[i*N+j]; if (diff < 0) diff = -diff; if (diff > error) error = diff; } } return error; } void matmul_base(int N, REAL *A, REAL * B, REAL *C); void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks); void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C); void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C); void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C); int main(int argc, char *argv[]) { int N; int num_tasks = 5; /* 5 is default number of tasks */ double elapsed_base, elapsed_openmp, elapsed_cuda_v1, elapsed_cuda_v2, elapsed_cuda_v3; /* for timing */ if (argc < 2) { fprintf(stderr, "Usage: matmul <n> [<#tasks(%d)>]\n", num_tasks); exit(1); } N = atoi(argv[1]); if (argc > 2) num_tasks = atoi(argv[2]); REAL * heap_buffer = (REAL*)malloc(sizeof(REAL)*N*N*4); /* we use 5 matrix in this example */ /* below is a cast from memory buffer to a 2-d row-major array */ REAL *A = heap_buffer; REAL *B = &heap_buffer[N*N]; REAL *C_base = &heap_buffer[2*N*N]; REAL *C_openmp = &heap_buffer[3*N*N]; srand48((1 << 12)); init(N, N, A); init(N, N, B); /* example run */ elapsed_base = read_timer(); matmul_base(N, A, B, C_base); elapsed_base = (read_timer() - elapsed_base); elapsed_openmp = read_timer(); matmul_openmp(N, A, B, C_openmp, num_tasks); elapsed_openmp = (read_timer() - elapsed_openmp); /* call and timing for the three CUDA versions */ /* there are three devices you can use on gpu.secs.oakland.edu, 0, 2, 3. * 1 is a graphical card with less computation capability. */ hipSetDevice(0); //call and time for matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C); //call and time for matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C); //call and time for matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C); printf("======================================================================================================\n"); printf("Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n", N, num_tasks); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("matmul_base:\t\t%4f\t%4f \t\t%g\n", elapsed_base * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_base)), maxerror(N, N, C_base, C_base)); printf("matmul_openmp:\t\t%4f\t%4f \t\t%g\n", elapsed_openmp * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_openmp)), maxerror(N, N, C_base, C_openmp)); /* put other printf statements for outputing results for GPU execution */ free(heap_buffer); return 0; } void matmul_base(int N, REAL *A, REAL * B, REAL *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks) { int i, j, k; #pragma omp parallel for shared(N,A,B,C,num_tasks) private(i,j,k) num_threads(num_tasks) for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } /* * call to kernel that uses GPU global memory */ void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C) { } /* * call to kernel that use GPU shared memory */ void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C) { } /* * call to sgemm of cublas library */ void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C) { }
.text .file "matmul.hip" .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z10read_timerv .LCPI0_0: .quad 0x408f400000000000 # double 1000 .text .globl _Z10read_timerv .p2align 4, 0x90 .type _Z10read_timerv,@function _Z10read_timerv: # @_Z10read_timerv .cfi_startproc # %bb.0: subq $24, %rsp .cfi_def_cfa_offset 32 leaq 8(%rsp), %rdi callq ftime cvtsi2sdq 8(%rsp), %xmm1 movzwl 16(%rsp), %eax cvtsi2sd %eax, %xmm0 divsd .LCPI0_0(%rip), %xmm0 addsd %xmm1, %xmm0 addq $24, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z10read_timerv, .Lfunc_end0-_Z10read_timerv .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z13read_timer_msv .LCPI1_0: .quad 0x408f400000000000 # double 1000 .text .globl _Z13read_timer_msv .p2align 4, 0x90 .type _Z13read_timer_msv,@function _Z13read_timer_msv: # @_Z13read_timer_msv .cfi_startproc # %bb.0: subq $24, %rsp .cfi_def_cfa_offset 32 leaq 8(%rsp), %rdi callq ftime cvtsi2sdq 8(%rsp), %xmm1 mulsd .LCPI1_0(%rip), %xmm1 movzwl 16(%rsp), %eax cvtsi2sd %eax, %xmm0 addsd %xmm1, %xmm0 addq $24, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z13read_timer_msv, .Lfunc_end1-_Z13read_timer_msv .cfi_endproc # -- End function .globl _Z4initiiPf # -- Begin function _Z4initiiPf .p2align 4, 0x90 .type _Z4initiiPf,@function _Z4initiiPf: # @_Z4initiiPf .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $24, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdx, 8(%rsp) # 8-byte Spill testl %edi, %edi jle .LBB2_6 # %bb.1: # %.preheader.lr.ph movl %esi, %ebp movl %edi, %eax movq %rax, 16(%rsp) # 8-byte Spill movl %esi, %r15d xorl %r12d, %r12d xorl %r13d, %r13d jmp .LBB2_2 .p2align 4, 0x90 .LBB2_5: # %._crit_edge # in Loop: Header=BB2_2 Depth=1 incq %r13 addl %ebp, %r12d cmpq 16(%rsp), %r13 # 8-byte Folded Reload je .LBB2_6 .LBB2_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB2_4 Depth 2 testl %ebp, %ebp jle .LBB2_5 # %bb.3: # %.lr.ph # in Loop: Header=BB2_2 Depth=1 movl %r12d, %eax movq 8(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %rbx xorl %r14d, %r14d .p2align 4, 0x90 .LBB2_4: # Parent Loop BB2_2 Depth=1 # => This Inner Loop Header: Depth=2 callq drand48 cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%rbx,%r14,4) incq %r14 cmpq %r14, %r15 jne .LBB2_4 jmp .LBB2_5 .LBB2_6: # %._crit_edge13 addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z4initiiPf, .Lfunc_end2-_Z4initiiPf .cfi_endproc # -- End function .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 # -- Begin function _Z8maxerroriiPfS_ .LCPI3_0: .quad 0x8000000000000000 # double -0 .quad 0x8000000000000000 # double -0 .text .globl _Z8maxerroriiPfS_ .p2align 4, 0x90 .type _Z8maxerroriiPfS_,@function _Z8maxerroriiPfS_: # @_Z8maxerroriiPfS_ .cfi_startproc # %bb.0: testl %edi, %edi jle .LBB3_1 # %bb.3: # %.preheader.lr.ph pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 movl %edi, %eax movl %esi, %edi xorpd %xmm3, %xmm3 xorl %r8d, %r8d xorps %xmm1, %xmm1 movaps .LCPI3_0(%rip), %xmm2 # xmm2 = [-0.0E+0,-0.0E+0] xorl %r9d, %r9d jmp .LBB3_4 .p2align 4, 0x90 .LBB3_5: # in Loop: Header=BB3_4 Depth=1 movapd %xmm3, %xmm0 .LBB3_10: # %._crit_edge # in Loop: Header=BB3_4 Depth=1 incq %r9 addl %esi, %r8d movapd %xmm0, %xmm3 cmpq %rax, %r9 je .LBB3_11 .LBB3_4: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB3_7 Depth 2 testl %esi, %esi jle .LBB3_5 # %bb.6: # %.lr.ph # in Loop: Header=BB3_4 Depth=1 movl %r8d, %r11d leaq (%rcx,%r11,4), %r10 leaq (%rdx,%r11,4), %r11 xorl %ebx, %ebx jmp .LBB3_7 .p2align 4, 0x90 .LBB3_9: # in Loop: Header=BB3_7 Depth=2 maxsd %xmm3, %xmm0 incq %rbx movapd %xmm0, %xmm3 cmpq %rbx, %rdi je .LBB3_10 .LBB3_7: # Parent Loop BB3_4 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r11,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero movaps %xmm0, %xmm4 subss (%r10,%rbx,4), %xmm4 divss %xmm0, %xmm4 xorps %xmm0, %xmm0 cvtss2sd %xmm4, %xmm0 ucomiss %xmm4, %xmm1 jbe .LBB3_9 # %bb.8: # in Loop: Header=BB3_7 Depth=2 xorps %xmm2, %xmm0 jmp .LBB3_9 .LBB3_11: popq %rbx .cfi_def_cfa_offset 8 .cfi_restore %rbx retq .LBB3_1: xorps %xmm0, %xmm0 retq .Lfunc_end3: .size _Z8maxerroriiPfS_, .Lfunc_end3-_Z8maxerroriiPfS_ .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI4_0: .quad 0x408f400000000000 # double 1000 .LCPI4_1: .quad 0x412e848000000000 # double 1.0E+6 .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 .LCPI4_2: .quad 0x8000000000000000 # double -0 .quad 0x8000000000000000 # double -0 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $72, %rsp .cfi_def_cfa_offset 128 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 cmpl $1, %edi jle .LBB4_44 # %bb.1: movq %rsi, %rbx movl %edi, %r12d movq 8(%rsi), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r14 movq %rax, (%rsp) # 8-byte Spill movl %r14d, %ebp movl $5, %eax cmpl $2, %r12d je .LBB4_3 # %bb.2: movq 16(%rbx), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol .LBB4_3: movq %rax, 64(%rsp) # 8-byte Spill movslq (%rsp), %rdi # 4-byte Folded Reload shlq $2, %rdi imulq %rdi, %rdi callq malloc movq %rax, 56(%rsp) # 8-byte Spill imulq %r14, %r14 movq %r14, 24(%rsp) # 8-byte Spill movl %r14d, %eax movq %rax, 16(%rsp) # 8-byte Spill movl %ebp, %eax imull %ebp, %eax addl %eax, %eax cltq movq %rax, 8(%rsp) # 8-byte Spill movl $4096, %edi # imm = 0x1000 callq srand48 testl %ebp, %ebp jle .LBB4_13 # %bb.4: # %.preheader.lr.ph.i movl (%rsp), %r13d # 4-byte Reload xorl %r12d, %r12d xorl %r14d, %r14d .p2align 4, 0x90 .LBB4_5: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB4_6 Depth 2 movl %r12d, %eax movq 56(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %r15 xorl %ebx, %ebx .p2align 4, 0x90 .LBB4_6: # Parent Loop BB4_5 Depth=1 # => This Inner Loop Header: Depth=2 callq drand48 cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%r15,%rbx,4) incq %rbx cmpq %rbx, %r13 jne .LBB4_6 # %bb.7: # %._crit_edge.i # in Loop: Header=BB4_5 Depth=1 incq %r14 addl %ebp, %r12d cmpq %r13, %r14 jne .LBB4_5 # %bb.8: # %_Z4initiiPf.exit testl %ebp, %ebp jle .LBB4_13 # %bb.9: # %.preheader.lr.ph.i58 movl (%rsp), %r13d # 4-byte Reload movq 56(%rsp), %rax # 8-byte Reload movq 16(%rsp), %rcx # 8-byte Reload leaq (%rax,%rcx,4), %rax movq %rax, 48(%rsp) # 8-byte Spill xorl %r14d, %r14d xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_10: # %.preheader.i60 # =>This Loop Header: Depth=1 # Child Loop BB4_11 Depth 2 movl %r14d, %eax movq 48(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %rbx xorl %r12d, %r12d .p2align 4, 0x90 .LBB4_11: # Parent Loop BB4_10 Depth=1 # => This Inner Loop Header: Depth=2 callq drand48 cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%rbx,%r12,4) incq %r12 cmpq %r12, %r13 jne .LBB4_11 # %bb.12: # %._crit_edge.i62 # in Loop: Header=BB4_10 Depth=1 incq %r15 addl %ebp, %r14d cmpq %r13, %r15 jne .LBB4_10 .LBB4_13: # %_Z4initiiPf.exit69 movq 24(%rsp), %r13 # 8-byte Reload shlq $32, %r13 movq 56(%rsp), %rbx # 8-byte Reload movq 16(%rsp), %rax # 8-byte Reload leaq (%rbx,%rax,4), %r14 movq 8(%rsp), %rax # 8-byte Reload leaq (%rbx,%rax,4), %r12 leaq 32(%rsp), %rdi callq ftime xorps %xmm0, %xmm0 cvtsi2sdq 32(%rsp), %xmm0 movzwl 40(%rsp), %eax cvtsi2sd %eax, %xmm1 divsd .LCPI4_0(%rip), %xmm1 addsd %xmm0, %xmm1 movsd %xmm1, 16(%rsp) # 8-byte Spill testl %ebp, %ebp jle .LBB4_20 # %bb.14: # %.preheader26.lr.ph.i movl (%rsp), %eax # 4-byte Reload leaq (,%rax,4), %rcx xorl %edx, %edx xorl %esi, %esi .p2align 4, 0x90 .LBB4_15: # %.preheader26.i # =>This Loop Header: Depth=1 # Child Loop BB4_16 Depth 2 # Child Loop BB4_17 Depth 3 movl %edx, %edi leaq (%rbx,%rdi,4), %rdi movq %rsi, %r8 imulq %rax, %r8 leaq (%r12,%r8,4), %r8 movq %r14, %r9 xorl %r10d, %r10d .p2align 4, 0x90 .LBB4_16: # %.preheader.i70 # Parent Loop BB4_15 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB4_17 Depth 3 xorpd %xmm0, %xmm0 movq %r9, %r11 xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_17: # Parent Loop BB4_15 Depth=1 # Parent Loop BB4_16 Depth=2 # => This Inner Loop Header: Depth=3 movss (%rdi,%r15), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss (%r11), %xmm1 addss %xmm1, %xmm0 addq $4, %r15 addq %rcx, %r11 cmpq %r15, %rcx jne .LBB4_17 # %bb.18: # %._crit_edge.i75 # in Loop: Header=BB4_16 Depth=2 movss %xmm0, (%r8,%r10,4) incq %r10 addq $4, %r9 cmpq %rax, %r10 jne .LBB4_16 # %bb.19: # %._crit_edge30.i # in Loop: Header=BB4_15 Depth=1 incq %rsi addl %ebp, %edx cmpq %rax, %rsi jne .LBB4_15 .LBB4_20: # %_Z11matmul_baseiPfS_S_.exit leaq (,%r13,2), %rax addq %r13, %rax movq %rax, 48(%rsp) # 8-byte Spill leaq 32(%rsp), %rdi callq ftime xorps %xmm0, %xmm0 cvtsi2sdq 32(%rsp), %xmm0 movsd %xmm0, 24(%rsp) # 8-byte Spill movzwl 40(%rsp), %ebx leaq 32(%rsp), %rdi callq ftime xorps %xmm0, %xmm0 cvtsi2sd %ebx, %xmm0 movq 56(%rsp), %rbx # 8-byte Reload divsd .LCPI4_0(%rip), %xmm0 addsd 24(%rsp), %xmm0 # 8-byte Folded Reload subsd 16(%rsp), %xmm0 # 8-byte Folded Reload movsd %xmm0, 8(%rsp) # 8-byte Spill testl %ebp, %ebp jle .LBB4_27 # %bb.21: # %.preheader26.lr.ph.i76 movq 48(%rsp), %rax # 8-byte Reload sarq $30, %rax addq %rbx, %rax movl (%rsp), %ecx # 4-byte Reload leaq (,%rcx,4), %rdx xorl %esi, %esi xorl %edi, %edi .p2align 4, 0x90 .LBB4_22: # %.preheader26.i78 # =>This Loop Header: Depth=1 # Child Loop BB4_23 Depth 2 # Child Loop BB4_24 Depth 3 movl %esi, %r8d leaq (%rbx,%r8,4), %r8 movq %rdi, %r9 imulq %rcx, %r9 leaq (%rax,%r9,4), %r9 movq %r14, %r10 xorl %r11d, %r11d .p2align 4, 0x90 .LBB4_23: # %.preheader.i81 # Parent Loop BB4_22 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB4_24 Depth 3 xorpd %xmm0, %xmm0 movq %r10, %r13 xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_24: # Parent Loop BB4_22 Depth=1 # Parent Loop BB4_23 Depth=2 # => This Inner Loop Header: Depth=3 movss (%r8,%r15), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss (%r13), %xmm1 addss %xmm1, %xmm0 addq $4, %r15 addq %rdx, %r13 cmpq %r15, %rdx jne .LBB4_24 # %bb.25: # %._crit_edge.i88 # in Loop: Header=BB4_23 Depth=2 movss %xmm0, (%r9,%r11,4) incq %r11 addq $4, %r10 cmpq %rcx, %r11 jne .LBB4_23 # %bb.26: # %._crit_edge30.i91 # in Loop: Header=BB4_22 Depth=1 incq %rdi addl %ebp, %esi cmpq %rcx, %rdi jne .LBB4_22 .LBB4_27: # %_Z13matmul_openmpiPfS_S_i.exit movq 32(%rsp), %r13 movzwl 40(%rsp), %r15d leaq 32(%rsp), %rdi callq ftime movq 32(%rsp), %rax movq %rax, 16(%rsp) # 8-byte Spill movzwl 40(%rsp), %eax movw %ax, 24(%rsp) # 2-byte Spill xorl %r14d, %r14d xorl %edi, %edi callq hipSetDevice movl $.Lstr, %edi callq puts@PLT movl $.L.str.2, %edi movl %ebp, %esi movq 64(%rsp), %rdx # 8-byte Reload # kill: def $edx killed $edx killed $rdx xorl %eax, %eax callq printf movl $.Lstr.3, %edi callq puts@PLT movl $.Lstr.2, %edi callq puts@PLT movl $.Lstr.3, %edi callq puts@PLT movsd .LCPI4_0(%rip), %xmm0 # xmm0 = mem[0],zero movsd 8(%rsp), %xmm3 # 8-byte Reload # xmm3 = mem[0],zero mulsd %xmm3, %xmm0 xorps %xmm1, %xmm1 cvtsi2sd %ebp, %xmm1 movapd %xmm1, %xmm2 addsd %xmm1, %xmm2 mulsd %xmm1, %xmm2 mulsd .LCPI4_1(%rip), %xmm3 mulsd %xmm1, %xmm2 movsd %xmm2, 64(%rsp) # 8-byte Spill movapd %xmm2, %xmm1 divsd %xmm3, %xmm1 testl %ebp, %ebp jle .LBB4_42 # %bb.28: # %.preheader.lr.ph.i94 movq %r13, 8(%rsp) # 8-byte Spill movl %r15d, %r13d movq 48(%rsp), %r15 # 8-byte Reload sarq $32, %r15 movl (%rsp), %eax # 4-byte Reload xorpd %xmm2, %xmm2 xorpd %xmm3, %xmm3 xorl %ecx, %ecx jmp .LBB4_29 .p2align 4, 0x90 .LBB4_33: # %._crit_edge.i96 # in Loop: Header=BB4_29 Depth=1 incq %rcx addl %ebp, %r14d cmpq %rax, %rcx je .LBB4_34 .LBB4_29: # %.preheader.i95 # =>This Loop Header: Depth=1 # Child Loop BB4_30 Depth 2 movl %r14d, %edx leaq (%r12,%rdx,4), %rdx xorl %esi, %esi movapd %xmm2, %xmm4 jmp .LBB4_30 .p2align 4, 0x90 .LBB4_32: # in Loop: Header=BB4_30 Depth=2 maxsd %xmm4, %xmm2 incq %rsi movapd %xmm2, %xmm4 cmpq %rsi, %rax je .LBB4_33 .LBB4_30: # Parent Loop BB4_29 Depth=1 # => This Inner Loop Header: Depth=2 movss (%rdx,%rsi,4), %xmm2 # xmm2 = mem[0],zero,zero,zero movaps %xmm2, %xmm5 subss %xmm2, %xmm5 divss %xmm2, %xmm5 xorps %xmm2, %xmm2 cvtss2sd %xmm5, %xmm2 ucomiss %xmm5, %xmm3 jbe .LBB4_32 # %bb.31: # in Loop: Header=BB4_30 Depth=2 xorps .LCPI4_2(%rip), %xmm2 jmp .LBB4_32 .LBB4_34: # %_Z8maxerroriiPfS_.exit movl $.L.str.5, %edi movb $3, %al callq printf testl %ebp, %ebp jle .LBB4_35 # %bb.36: # %.preheader.lr.ph.i102 movl (%rsp), %eax # 4-byte Reload leaq (%rbx,%r15,4), %rcx xorpd %xmm2, %xmm2 xorl %edx, %edx xorpd %xmm0, %xmm0 xorl %esi, %esi movl %r13d, %r15d movq 8(%rsp), %r13 # 8-byte Reload movaps .LCPI4_2(%rip), %xmm4 # xmm4 = [-0.0E+0,-0.0E+0] jmp .LBB4_37 .p2align 4, 0x90 .LBB4_41: # %._crit_edge.i107 # in Loop: Header=BB4_37 Depth=1 incq %rsi addl %ebp, %edx cmpq %rax, %rsi je .LBB4_43 .LBB4_37: # %.preheader.i104 # =>This Loop Header: Depth=1 # Child Loop BB4_38 Depth 2 movl %edx, %r8d leaq (%rcx,%r8,4), %rdi leaq (%r12,%r8,4), %r8 xorl %r9d, %r9d movapd %xmm2, %xmm1 jmp .LBB4_38 .p2align 4, 0x90 .LBB4_40: # in Loop: Header=BB4_38 Depth=2 maxsd %xmm1, %xmm2 incq %r9 movapd %xmm2, %xmm1 cmpq %r9, %rax je .LBB4_41 .LBB4_38: # Parent Loop BB4_37 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r8,%r9,4), %xmm2 # xmm2 = mem[0],zero,zero,zero movaps %xmm2, %xmm3 subss (%rdi,%r9,4), %xmm3 divss %xmm2, %xmm3 xorps %xmm2, %xmm2 cvtss2sd %xmm3, %xmm2 ucomiss %xmm3, %xmm0 jbe .LBB4_40 # %bb.39: # in Loop: Header=BB4_38 Depth=2 xorps %xmm4, %xmm2 jmp .LBB4_40 .LBB4_42: # %_Z8maxerroriiPfS_.exit117.critedge movl $.L.str.5, %edi xorpd %xmm2, %xmm2 movb $3, %al callq printf xorpd %xmm2, %xmm2 jmp .LBB4_43 .LBB4_35: xorpd %xmm2, %xmm2 movl %r13d, %r15d movq 8(%rsp), %r13 # 8-byte Reload .LBB4_43: # %_Z8maxerroriiPfS_.exit117 movzwl 24(%rsp), %eax # 2-byte Folded Reload xorps %xmm1, %xmm1 cvtsi2sd %eax, %xmm1 movsd .LCPI4_0(%rip), %xmm4 # xmm4 = mem[0],zero divsd %xmm4, %xmm1 xorps %xmm0, %xmm0 cvtsi2sdq 16(%rsp), %xmm0 # 8-byte Folded Reload addsd %xmm1, %xmm0 movzwl %r15w, %eax xorps %xmm1, %xmm1 cvtsi2sd %eax, %xmm1 divsd %xmm4, %xmm1 xorps %xmm3, %xmm3 cvtsi2sd %r13, %xmm3 addsd %xmm1, %xmm3 subsd %xmm3, %xmm0 movsd .LCPI4_1(%rip), %xmm3 # xmm3 = mem[0],zero mulsd %xmm0, %xmm3 movsd 64(%rsp), %xmm1 # 8-byte Reload # xmm1 = mem[0],zero divsd %xmm3, %xmm1 mulsd %xmm4, %xmm0 movl $.L.str.6, %edi movb $3, %al callq printf movq %rbx, %rdi callq free xorl %eax, %eax addq $72, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB4_44: .cfi_def_cfa_offset 128 movq stderr(%rip), %rdi movl $.L.str, %esi movl $5, %edx xorl %eax, %eax callq fprintf movl $1, %edi callq exit .Lfunc_end4: .size main, .Lfunc_end4-main .cfi_endproc # -- End function .globl _Z11matmul_baseiPfS_S_ # -- Begin function _Z11matmul_baseiPfS_S_ .p2align 4, 0x90 .type _Z11matmul_baseiPfS_S_,@function _Z11matmul_baseiPfS_S_: # @_Z11matmul_baseiPfS_S_ .cfi_startproc # %bb.0: testl %edi, %edi jle .LBB5_8 # %bb.1: # %.preheader26.lr.ph pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %edi, %eax leaq (,%rax,4), %r8 xorl %r9d, %r9d xorl %r10d, %r10d .p2align 4, 0x90 .LBB5_2: # %.preheader26 # =>This Loop Header: Depth=1 # Child Loop BB5_3 Depth 2 # Child Loop BB5_4 Depth 3 movl %r9d, %r11d leaq (%rsi,%r11,4), %r11 movq %r10, %rbx imulq %rax, %rbx leaq (%rcx,%rbx,4), %rbx movq %rdx, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB5_3: # %.preheader # Parent Loop BB5_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB5_4 Depth 3 xorps %xmm0, %xmm0 movq %r14, %r12 xorl %r13d, %r13d .p2align 4, 0x90 .LBB5_4: # Parent Loop BB5_2 Depth=1 # Parent Loop BB5_3 Depth=2 # => This Inner Loop Header: Depth=3 movss (%r11,%r13,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss (%r12), %xmm1 addss %xmm1, %xmm0 incq %r13 addq %r8, %r12 cmpq %r13, %rax jne .LBB5_4 # %bb.5: # %._crit_edge # in Loop: Header=BB5_3 Depth=2 movss %xmm0, (%rbx,%r15,4) incq %r15 addq $4, %r14 cmpq %rax, %r15 jne .LBB5_3 # %bb.6: # %._crit_edge30 # in Loop: Header=BB5_2 Depth=1 incq %r10 addl %edi, %r9d cmpq %rax, %r10 jne .LBB5_2 # %bb.7: popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .LBB5_8: # %._crit_edge32 retq .Lfunc_end5: .size _Z11matmul_baseiPfS_S_, .Lfunc_end5-_Z11matmul_baseiPfS_S_ .cfi_endproc # -- End function .globl _Z13matmul_openmpiPfS_S_i # -- Begin function _Z13matmul_openmpiPfS_S_i .p2align 4, 0x90 .type _Z13matmul_openmpiPfS_S_i,@function _Z13matmul_openmpiPfS_S_i: # @_Z13matmul_openmpiPfS_S_i .cfi_startproc # %bb.0: testl %edi, %edi jle .LBB6_8 # %bb.1: # %.preheader26.lr.ph pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %edi, %eax leaq (,%rax,4), %r8 xorl %r9d, %r9d xorl %r10d, %r10d .p2align 4, 0x90 .LBB6_2: # %.preheader26 # =>This Loop Header: Depth=1 # Child Loop BB6_3 Depth 2 # Child Loop BB6_4 Depth 3 movl %r9d, %r11d leaq (%rsi,%r11,4), %r11 movq %r10, %rbx imulq %rax, %rbx leaq (%rcx,%rbx,4), %rbx movq %rdx, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB6_3: # %.preheader # Parent Loop BB6_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB6_4 Depth 3 xorps %xmm0, %xmm0 movq %r14, %r12 xorl %r13d, %r13d .p2align 4, 0x90 .LBB6_4: # Parent Loop BB6_2 Depth=1 # Parent Loop BB6_3 Depth=2 # => This Inner Loop Header: Depth=3 movss (%r11,%r13,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss (%r12), %xmm1 addss %xmm1, %xmm0 incq %r13 addq %r8, %r12 cmpq %r13, %rax jne .LBB6_4 # %bb.5: # %._crit_edge # in Loop: Header=BB6_3 Depth=2 movss %xmm0, (%rbx,%r15,4) incq %r15 addq $4, %r14 cmpq %rax, %r15 jne .LBB6_3 # %bb.6: # %._crit_edge30 # in Loop: Header=BB6_2 Depth=1 incq %r10 addl %edi, %r9d cmpq %rax, %r10 jne .LBB6_2 # %bb.7: popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .LBB6_8: # %._crit_edge32 retq .Lfunc_end6: .size _Z13matmul_openmpiPfS_S_i, .Lfunc_end6-_Z13matmul_openmpiPfS_S_i .cfi_endproc # -- End function .globl _Z22matmul_cuda_v1_vanillaiPfS_S_ # -- Begin function _Z22matmul_cuda_v1_vanillaiPfS_S_ .p2align 4, 0x90 .type _Z22matmul_cuda_v1_vanillaiPfS_S_,@function _Z22matmul_cuda_v1_vanillaiPfS_S_: # @_Z22matmul_cuda_v1_vanillaiPfS_S_ .cfi_startproc # %bb.0: retq .Lfunc_end7: .size _Z22matmul_cuda_v1_vanillaiPfS_S_, .Lfunc_end7-_Z22matmul_cuda_v1_vanillaiPfS_S_ .cfi_endproc # -- End function .globl _Z20matmul_cuda_v1_shmemiPfS_S_ # -- Begin function _Z20matmul_cuda_v1_shmemiPfS_S_ .p2align 4, 0x90 .type _Z20matmul_cuda_v1_shmemiPfS_S_,@function _Z20matmul_cuda_v1_shmemiPfS_S_: # @_Z20matmul_cuda_v1_shmemiPfS_S_ .cfi_startproc # %bb.0: retq .Lfunc_end8: .size _Z20matmul_cuda_v1_shmemiPfS_S_, .Lfunc_end8-_Z20matmul_cuda_v1_shmemiPfS_S_ .cfi_endproc # -- End function .globl _Z21matmul_cuda_v1_cublasiPfS_S_ # -- Begin function _Z21matmul_cuda_v1_cublasiPfS_S_ .p2align 4, 0x90 .type _Z21matmul_cuda_v1_cublasiPfS_S_,@function _Z21matmul_cuda_v1_cublasiPfS_S_: # @_Z21matmul_cuda_v1_cublasiPfS_S_ .cfi_startproc # %bb.0: retq .Lfunc_end9: .size _Z21matmul_cuda_v1_cublasiPfS_S_, .Lfunc_end9-_Z21matmul_cuda_v1_cublasiPfS_S_ .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Usage: matmul <n> [<#tasks(%d)>]\n" .size .L.str, 34 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n" .size .L.str.2, 80 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "matmul_base:\t\t%4f\t%4f \t\t%g\n" .size .L.str.5, 28 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "matmul_openmp:\t\t%4f\t%4f \t\t%g\n" .size .L.str.6, 30 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "======================================================================================================" .size .Lstr, 103 .type .Lstr.2,@object # @str.2 .Lstr.2: .asciz "Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)" .size .Lstr.2, 62 .type .Lstr.3,@object # @str.3 .Lstr.3: .asciz "------------------------------------------------------------------------------------------------------" .size .Lstr.3, 103 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001b6161_00000000-6_matmul.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2069: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2069: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z10read_timerv .type _Z10read_timerv, @function _Z10read_timerv: .LFB2057: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movq %fs:40, %rax movq %rax, 24(%rsp) xorl %eax, %eax movq %rsp, %rdi call ftime@PLT movzwl 8(%rsp), %eax pxor %xmm0, %xmm0 cvtsi2sdl %eax, %xmm0 divsd .LC0(%rip), %xmm0 pxor %xmm1, %xmm1 cvtsi2sdq (%rsp), %xmm1 addsd %xmm1, %xmm0 movq 24(%rsp), %rax subq %fs:40, %rax jne .L6 addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L6: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z10read_timerv, .-_Z10read_timerv .globl _Z13read_timer_msv .type _Z13read_timer_msv, @function _Z13read_timer_msv: .LFB2058: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movq %fs:40, %rax movq %rax, 24(%rsp) xorl %eax, %eax movq %rsp, %rdi call ftime@PLT pxor %xmm0, %xmm0 cvtsi2sdq (%rsp), %xmm0 mulsd .LC0(%rip), %xmm0 movzwl 8(%rsp), %eax pxor %xmm1, %xmm1 cvtsi2sdl %eax, %xmm1 addsd %xmm1, %xmm0 movq 24(%rsp), %rax subq %fs:40, %rax jne .L10 addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L10: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size _Z13read_timer_msv, .-_Z13read_timer_msv .globl _Z4initiiPf .type _Z4initiiPf, @function _Z4initiiPf: .LFB2059: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $24, %rsp .cfi_def_cfa_offset 80 movl %edi, 4(%rsp) testl %edi, %edi jle .L11 movl %esi, %r14d movq %rdx, %r15 movl $0, %r13d movl $0, %r12d movslq %esi, %rax movq %rax, 8(%rsp) jmp .L13 .L15: movslq %r13d, %rax leaq (%r15,%rax,4), %rbx movq 8(%rsp), %rcx addq %rcx, %rax leaq (%r15,%rax,4), %rbp .L14: call drand48@PLT cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L14 .L16: addl $1, %r12d addl %r14d, %r13d cmpl %r12d, 4(%rsp) je .L11 .L13: testl %r14d, %r14d jg .L15 jmp .L16 .L11: addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _Z4initiiPf, .-_Z4initiiPf .globl _Z8maxerroriiPfS_ .type _Z8maxerroriiPfS_, @function _Z8maxerroriiPfS_: .LFB2060: .cfi_startproc endbr64 movl %edi, %r8d movl %esi, %r9d movq %rdx, %rsi testl %edi, %edi jle .L28 movl $0, %r10d pxor %xmm2, %xmm2 movl $0, %edi movslq %r9d, %r11 pxor %xmm3, %xmm3 movq .LC3(%rip), %xmm4 jmp .L21 .L22: maxsd %xmm2, %xmm1 movapd %xmm1, %xmm2 addq $4, %rax cmpq %rdx, %rax je .L27 .L25: movss (%rsi,%rax), %xmm1 movaps %xmm1, %xmm0 subss (%rcx,%rax), %xmm0 divss %xmm1, %xmm0 pxor %xmm1, %xmm1 cvtss2sd %xmm0, %xmm1 comiss %xmm0, %xmm3 jbe .L22 xorpd %xmm4, %xmm1 jmp .L22 .L27: addl $1, %edi addl %r9d, %r10d cmpl %edi, %r8d je .L19 .L21: testl %r9d, %r9d jle .L27 movslq %r10d, %rdx leaq 0(,%rdx,4), %rax addq %r11, %rdx salq $2, %rdx jmp .L25 .L28: pxor %xmm2, %xmm2 .L19: movapd %xmm2, %xmm0 ret .cfi_endproc .LFE2060: .size _Z8maxerroriiPfS_, .-_Z8maxerroriiPfS_ .globl _Z11matmul_baseiPfS_S_ .type _Z11matmul_baseiPfS_S_, @function _Z11matmul_baseiPfS_S_: .LFB2062: .cfi_startproc endbr64 testl %edi, %edi jle .L40 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movl %edi, %r10d movq %rdx, %rbp movq %rcx, %r9 movslq %edi, %rbx leaq 0(,%rbx,4), %rcx movq %rsi, %r11 addq %rcx, %rsi movl $0, %r12d .L34: movq %rbp, %r8 movl $0, %edi .L37: movq %r8, %rdx movq %r11, %rax pxor %xmm1, %xmm1 .L35: movss (%rax), %xmm0 mulss (%rdx), %xmm0 addss %xmm0, %xmm1 addq $4, %rax addq %rcx, %rdx cmpq %rsi, %rax jne .L35 movss %xmm1, (%r9,%rdi,4) addq $1, %rdi addq $4, %r8 cmpq %rbx, %rdi jne .L37 addl $1, %r12d addq %rcx, %r9 addq %rcx, %r11 addq %rcx, %rsi cmpl %r12d, %r10d jne .L34 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L40: .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 ret .cfi_endproc .LFE2062: .size _Z11matmul_baseiPfS_S_, .-_Z11matmul_baseiPfS_S_ .globl _Z13matmul_openmpiPfS_S_i .type _Z13matmul_openmpiPfS_S_i, @function _Z13matmul_openmpiPfS_S_i: .LFB2063: .cfi_startproc endbr64 testl %edi, %edi jle .L51 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movl %edi, %r10d movq %rdx, %rbp movq %rcx, %r9 movslq %edi, %rbx leaq 0(,%rbx,4), %rcx movq %rsi, %r11 addq %rcx, %rsi movl $0, %r12d .L45: movq %rbp, %r8 movl $0, %edi .L48: movq %r8, %rdx movq %r11, %rax pxor %xmm1, %xmm1 .L46: movss (%rax), %xmm0 mulss (%rdx), %xmm0 addss %xmm0, %xmm1 addq $4, %rax addq %rcx, %rdx cmpq %rsi, %rax jne .L46 movss %xmm1, (%r9,%rdi,4) addq $1, %rdi addq $4, %r8 cmpq %rbx, %rdi jne .L48 addl $1, %r12d addq %rcx, %r9 addq %rcx, %r11 addq %rcx, %rsi cmpl %r12d, %r10d jne .L45 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L51: .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 ret .cfi_endproc .LFE2063: .size _Z13matmul_openmpiPfS_S_i, .-_Z13matmul_openmpiPfS_S_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC4: .string "Usage: matmul <n> [<#tasks(%d)>]\n" .align 8 .LC5: .string "======================================================================================================\n" .align 8 .LC6: .string "Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n" .align 8 .LC7: .string "------------------------------------------------------------------------------------------------------\n" .align 8 .LC8: .string "Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC10: .string "matmul_base:\t\t%4f\t%4f \t\t%g\n" .LC11: .string "matmul_openmp:\t\t%4f\t%4f \t\t%g\n" .text .globl main .type main, @function main: .LFB2061: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $40, %rsp .cfi_def_cfa_offset 96 cmpl $1, %edi jle .L59 movl %edi, %ebp movq %rsi, %r13 movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %r12 movl %eax, %ebx movl $5, 12(%rsp) cmpl $2, %ebp jg .L60 .L56: movslq %r12d, %r13 imulq %r13, %r13 movq %r13, %rdi salq $4, %rdi call malloc@PLT movq %rax, %rbp movl %r12d, %eax imull %r12d, %eax leaq 0(%rbp,%r13,4), %r14 leal (%rax,%rax), %edx movslq %edx, %rdx leaq 0(%rbp,%rdx,4), %r13 cltq leaq 0(%r13,%rax,4), %r15 movl $4096, %edi call srand48@PLT movq %rbp, %rdx movl %ebx, %esi movl %ebx, %edi call _Z4initiiPf movq %r14, %rdx movl %ebx, %esi movl %ebx, %edi call _Z4initiiPf call _Z10read_timerv movsd %xmm0, 16(%rsp) movq %r13, %rcx movq %r14, %rdx movq %rbp, %rsi movl %ebx, %edi call _Z11matmul_baseiPfS_S_ call _Z10read_timerv movapd %xmm0, %xmm4 subsd 16(%rsp), %xmm4 movsd %xmm4, 16(%rsp) call _Z10read_timerv movsd %xmm0, 24(%rsp) movl 12(%rsp), %r8d movq %r15, %rcx movq %r14, %rdx movq %rbp, %rsi movl %ebx, %edi call _Z13matmul_openmpiPfS_S_i call _Z10read_timerv movapd %xmm0, %xmm5 subsd 24(%rsp), %xmm5 movsd %xmm5, 24(%rsp) movl $0, %edi call cudaSetDevice@PLT leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 12(%rsp), %ecx movl %ebx, %edx leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC7(%rip), %r14 movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %r13, %rcx movq %r13, %rdx movl %ebx, %esi movl %ebx, %edi call _Z8maxerroriiPfS_ movapd %xmm0, %xmm2 pxor %xmm1, %xmm1 cvtsi2sdl %r12d, %xmm1 movapd %xmm1, %xmm3 addsd %xmm1, %xmm3 mulsd %xmm1, %xmm3 mulsd %xmm1, %xmm3 movq %xmm3, %r12 movsd .LC9(%rip), %xmm1 movsd 16(%rsp), %xmm4 mulsd %xmm4, %xmm1 movsd .LC0(%rip), %xmm0 mulsd %xmm4, %xmm0 movapd %xmm3, %xmm6 divsd %xmm1, %xmm6 movapd %xmm6, %xmm1 leaq .LC10(%rip), %rsi movl $2, %edi movl $3, %eax call __printf_chk@PLT movq %r15, %rcx movq %r13, %rdx movl %ebx, %esi movl %ebx, %edi call _Z8maxerroriiPfS_ movapd %xmm0, %xmm2 movsd .LC9(%rip), %xmm1 movsd 24(%rsp), %xmm5 mulsd %xmm5, %xmm1 movsd .LC0(%rip), %xmm0 mulsd %xmm5, %xmm0 movq %r12, %xmm7 divsd %xmm1, %xmm7 movapd %xmm7, %xmm1 leaq .LC11(%rip), %rsi movl $2, %edi movl $3, %eax call __printf_chk@PLT movq %rbp, %rdi call free@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L59: .cfi_restore_state movl $5, %ecx leaq .LC4(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L60: movq 16(%r13), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movl %eax, 12(%rsp) jmp .L56 .cfi_endproc .LFE2061: .size main, .-main .globl _Z22matmul_cuda_v1_vanillaiPfS_S_ .type _Z22matmul_cuda_v1_vanillaiPfS_S_, @function _Z22matmul_cuda_v1_vanillaiPfS_S_: .LFB2064: .cfi_startproc endbr64 ret .cfi_endproc .LFE2064: .size _Z22matmul_cuda_v1_vanillaiPfS_S_, .-_Z22matmul_cuda_v1_vanillaiPfS_S_ .globl _Z20matmul_cuda_v1_shmemiPfS_S_ .type _Z20matmul_cuda_v1_shmemiPfS_S_, @function _Z20matmul_cuda_v1_shmemiPfS_S_: .LFB2065: .cfi_startproc endbr64 ret .cfi_endproc .LFE2065: .size _Z20matmul_cuda_v1_shmemiPfS_S_, .-_Z20matmul_cuda_v1_shmemiPfS_S_ .globl _Z21matmul_cuda_v1_cublasiPfS_S_ .type _Z21matmul_cuda_v1_cublasiPfS_S_, @function _Z21matmul_cuda_v1_cublasiPfS_S_: .LFB2066: .cfi_startproc endbr64 ret .cfi_endproc .LFE2066: .size _Z21matmul_cuda_v1_cublasiPfS_S_, .-_Z21matmul_cuda_v1_cublasiPfS_S_ .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2092: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2092: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 0 .long 1083129856 .section .rodata.cst16,"aM",@progbits,16 .align 16 .LC3: .long 0 .long -2147483648 .long 0 .long 0 .section .rodata.cst8 .align 8 .LC9: .long 0 .long 1093567616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "matmul.hip" .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z10read_timerv .LCPI0_0: .quad 0x408f400000000000 # double 1000 .text .globl _Z10read_timerv .p2align 4, 0x90 .type _Z10read_timerv,@function _Z10read_timerv: # @_Z10read_timerv .cfi_startproc # %bb.0: subq $24, %rsp .cfi_def_cfa_offset 32 leaq 8(%rsp), %rdi callq ftime cvtsi2sdq 8(%rsp), %xmm1 movzwl 16(%rsp), %eax cvtsi2sd %eax, %xmm0 divsd .LCPI0_0(%rip), %xmm0 addsd %xmm1, %xmm0 addq $24, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z10read_timerv, .Lfunc_end0-_Z10read_timerv .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z13read_timer_msv .LCPI1_0: .quad 0x408f400000000000 # double 1000 .text .globl _Z13read_timer_msv .p2align 4, 0x90 .type _Z13read_timer_msv,@function _Z13read_timer_msv: # @_Z13read_timer_msv .cfi_startproc # %bb.0: subq $24, %rsp .cfi_def_cfa_offset 32 leaq 8(%rsp), %rdi callq ftime cvtsi2sdq 8(%rsp), %xmm1 mulsd .LCPI1_0(%rip), %xmm1 movzwl 16(%rsp), %eax cvtsi2sd %eax, %xmm0 addsd %xmm1, %xmm0 addq $24, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z13read_timer_msv, .Lfunc_end1-_Z13read_timer_msv .cfi_endproc # -- End function .globl _Z4initiiPf # -- Begin function _Z4initiiPf .p2align 4, 0x90 .type _Z4initiiPf,@function _Z4initiiPf: # @_Z4initiiPf .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $24, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdx, 8(%rsp) # 8-byte Spill testl %edi, %edi jle .LBB2_6 # %bb.1: # %.preheader.lr.ph movl %esi, %ebp movl %edi, %eax movq %rax, 16(%rsp) # 8-byte Spill movl %esi, %r15d xorl %r12d, %r12d xorl %r13d, %r13d jmp .LBB2_2 .p2align 4, 0x90 .LBB2_5: # %._crit_edge # in Loop: Header=BB2_2 Depth=1 incq %r13 addl %ebp, %r12d cmpq 16(%rsp), %r13 # 8-byte Folded Reload je .LBB2_6 .LBB2_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB2_4 Depth 2 testl %ebp, %ebp jle .LBB2_5 # %bb.3: # %.lr.ph # in Loop: Header=BB2_2 Depth=1 movl %r12d, %eax movq 8(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %rbx xorl %r14d, %r14d .p2align 4, 0x90 .LBB2_4: # Parent Loop BB2_2 Depth=1 # => This Inner Loop Header: Depth=2 callq drand48 cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%rbx,%r14,4) incq %r14 cmpq %r14, %r15 jne .LBB2_4 jmp .LBB2_5 .LBB2_6: # %._crit_edge13 addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z4initiiPf, .Lfunc_end2-_Z4initiiPf .cfi_endproc # -- End function .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 # -- Begin function _Z8maxerroriiPfS_ .LCPI3_0: .quad 0x8000000000000000 # double -0 .quad 0x8000000000000000 # double -0 .text .globl _Z8maxerroriiPfS_ .p2align 4, 0x90 .type _Z8maxerroriiPfS_,@function _Z8maxerroriiPfS_: # @_Z8maxerroriiPfS_ .cfi_startproc # %bb.0: testl %edi, %edi jle .LBB3_1 # %bb.3: # %.preheader.lr.ph pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 movl %edi, %eax movl %esi, %edi xorpd %xmm3, %xmm3 xorl %r8d, %r8d xorps %xmm1, %xmm1 movaps .LCPI3_0(%rip), %xmm2 # xmm2 = [-0.0E+0,-0.0E+0] xorl %r9d, %r9d jmp .LBB3_4 .p2align 4, 0x90 .LBB3_5: # in Loop: Header=BB3_4 Depth=1 movapd %xmm3, %xmm0 .LBB3_10: # %._crit_edge # in Loop: Header=BB3_4 Depth=1 incq %r9 addl %esi, %r8d movapd %xmm0, %xmm3 cmpq %rax, %r9 je .LBB3_11 .LBB3_4: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB3_7 Depth 2 testl %esi, %esi jle .LBB3_5 # %bb.6: # %.lr.ph # in Loop: Header=BB3_4 Depth=1 movl %r8d, %r11d leaq (%rcx,%r11,4), %r10 leaq (%rdx,%r11,4), %r11 xorl %ebx, %ebx jmp .LBB3_7 .p2align 4, 0x90 .LBB3_9: # in Loop: Header=BB3_7 Depth=2 maxsd %xmm3, %xmm0 incq %rbx movapd %xmm0, %xmm3 cmpq %rbx, %rdi je .LBB3_10 .LBB3_7: # Parent Loop BB3_4 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r11,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero movaps %xmm0, %xmm4 subss (%r10,%rbx,4), %xmm4 divss %xmm0, %xmm4 xorps %xmm0, %xmm0 cvtss2sd %xmm4, %xmm0 ucomiss %xmm4, %xmm1 jbe .LBB3_9 # %bb.8: # in Loop: Header=BB3_7 Depth=2 xorps %xmm2, %xmm0 jmp .LBB3_9 .LBB3_11: popq %rbx .cfi_def_cfa_offset 8 .cfi_restore %rbx retq .LBB3_1: xorps %xmm0, %xmm0 retq .Lfunc_end3: .size _Z8maxerroriiPfS_, .Lfunc_end3-_Z8maxerroriiPfS_ .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI4_0: .quad 0x408f400000000000 # double 1000 .LCPI4_1: .quad 0x412e848000000000 # double 1.0E+6 .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 .LCPI4_2: .quad 0x8000000000000000 # double -0 .quad 0x8000000000000000 # double -0 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $72, %rsp .cfi_def_cfa_offset 128 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 cmpl $1, %edi jle .LBB4_44 # %bb.1: movq %rsi, %rbx movl %edi, %r12d movq 8(%rsi), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r14 movq %rax, (%rsp) # 8-byte Spill movl %r14d, %ebp movl $5, %eax cmpl $2, %r12d je .LBB4_3 # %bb.2: movq 16(%rbx), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol .LBB4_3: movq %rax, 64(%rsp) # 8-byte Spill movslq (%rsp), %rdi # 4-byte Folded Reload shlq $2, %rdi imulq %rdi, %rdi callq malloc movq %rax, 56(%rsp) # 8-byte Spill imulq %r14, %r14 movq %r14, 24(%rsp) # 8-byte Spill movl %r14d, %eax movq %rax, 16(%rsp) # 8-byte Spill movl %ebp, %eax imull %ebp, %eax addl %eax, %eax cltq movq %rax, 8(%rsp) # 8-byte Spill movl $4096, %edi # imm = 0x1000 callq srand48 testl %ebp, %ebp jle .LBB4_13 # %bb.4: # %.preheader.lr.ph.i movl (%rsp), %r13d # 4-byte Reload xorl %r12d, %r12d xorl %r14d, %r14d .p2align 4, 0x90 .LBB4_5: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB4_6 Depth 2 movl %r12d, %eax movq 56(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %r15 xorl %ebx, %ebx .p2align 4, 0x90 .LBB4_6: # Parent Loop BB4_5 Depth=1 # => This Inner Loop Header: Depth=2 callq drand48 cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%r15,%rbx,4) incq %rbx cmpq %rbx, %r13 jne .LBB4_6 # %bb.7: # %._crit_edge.i # in Loop: Header=BB4_5 Depth=1 incq %r14 addl %ebp, %r12d cmpq %r13, %r14 jne .LBB4_5 # %bb.8: # %_Z4initiiPf.exit testl %ebp, %ebp jle .LBB4_13 # %bb.9: # %.preheader.lr.ph.i58 movl (%rsp), %r13d # 4-byte Reload movq 56(%rsp), %rax # 8-byte Reload movq 16(%rsp), %rcx # 8-byte Reload leaq (%rax,%rcx,4), %rax movq %rax, 48(%rsp) # 8-byte Spill xorl %r14d, %r14d xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_10: # %.preheader.i60 # =>This Loop Header: Depth=1 # Child Loop BB4_11 Depth 2 movl %r14d, %eax movq 48(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %rbx xorl %r12d, %r12d .p2align 4, 0x90 .LBB4_11: # Parent Loop BB4_10 Depth=1 # => This Inner Loop Header: Depth=2 callq drand48 cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%rbx,%r12,4) incq %r12 cmpq %r12, %r13 jne .LBB4_11 # %bb.12: # %._crit_edge.i62 # in Loop: Header=BB4_10 Depth=1 incq %r15 addl %ebp, %r14d cmpq %r13, %r15 jne .LBB4_10 .LBB4_13: # %_Z4initiiPf.exit69 movq 24(%rsp), %r13 # 8-byte Reload shlq $32, %r13 movq 56(%rsp), %rbx # 8-byte Reload movq 16(%rsp), %rax # 8-byte Reload leaq (%rbx,%rax,4), %r14 movq 8(%rsp), %rax # 8-byte Reload leaq (%rbx,%rax,4), %r12 leaq 32(%rsp), %rdi callq ftime xorps %xmm0, %xmm0 cvtsi2sdq 32(%rsp), %xmm0 movzwl 40(%rsp), %eax cvtsi2sd %eax, %xmm1 divsd .LCPI4_0(%rip), %xmm1 addsd %xmm0, %xmm1 movsd %xmm1, 16(%rsp) # 8-byte Spill testl %ebp, %ebp jle .LBB4_20 # %bb.14: # %.preheader26.lr.ph.i movl (%rsp), %eax # 4-byte Reload leaq (,%rax,4), %rcx xorl %edx, %edx xorl %esi, %esi .p2align 4, 0x90 .LBB4_15: # %.preheader26.i # =>This Loop Header: Depth=1 # Child Loop BB4_16 Depth 2 # Child Loop BB4_17 Depth 3 movl %edx, %edi leaq (%rbx,%rdi,4), %rdi movq %rsi, %r8 imulq %rax, %r8 leaq (%r12,%r8,4), %r8 movq %r14, %r9 xorl %r10d, %r10d .p2align 4, 0x90 .LBB4_16: # %.preheader.i70 # Parent Loop BB4_15 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB4_17 Depth 3 xorpd %xmm0, %xmm0 movq %r9, %r11 xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_17: # Parent Loop BB4_15 Depth=1 # Parent Loop BB4_16 Depth=2 # => This Inner Loop Header: Depth=3 movss (%rdi,%r15), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss (%r11), %xmm1 addss %xmm1, %xmm0 addq $4, %r15 addq %rcx, %r11 cmpq %r15, %rcx jne .LBB4_17 # %bb.18: # %._crit_edge.i75 # in Loop: Header=BB4_16 Depth=2 movss %xmm0, (%r8,%r10,4) incq %r10 addq $4, %r9 cmpq %rax, %r10 jne .LBB4_16 # %bb.19: # %._crit_edge30.i # in Loop: Header=BB4_15 Depth=1 incq %rsi addl %ebp, %edx cmpq %rax, %rsi jne .LBB4_15 .LBB4_20: # %_Z11matmul_baseiPfS_S_.exit leaq (,%r13,2), %rax addq %r13, %rax movq %rax, 48(%rsp) # 8-byte Spill leaq 32(%rsp), %rdi callq ftime xorps %xmm0, %xmm0 cvtsi2sdq 32(%rsp), %xmm0 movsd %xmm0, 24(%rsp) # 8-byte Spill movzwl 40(%rsp), %ebx leaq 32(%rsp), %rdi callq ftime xorps %xmm0, %xmm0 cvtsi2sd %ebx, %xmm0 movq 56(%rsp), %rbx # 8-byte Reload divsd .LCPI4_0(%rip), %xmm0 addsd 24(%rsp), %xmm0 # 8-byte Folded Reload subsd 16(%rsp), %xmm0 # 8-byte Folded Reload movsd %xmm0, 8(%rsp) # 8-byte Spill testl %ebp, %ebp jle .LBB4_27 # %bb.21: # %.preheader26.lr.ph.i76 movq 48(%rsp), %rax # 8-byte Reload sarq $30, %rax addq %rbx, %rax movl (%rsp), %ecx # 4-byte Reload leaq (,%rcx,4), %rdx xorl %esi, %esi xorl %edi, %edi .p2align 4, 0x90 .LBB4_22: # %.preheader26.i78 # =>This Loop Header: Depth=1 # Child Loop BB4_23 Depth 2 # Child Loop BB4_24 Depth 3 movl %esi, %r8d leaq (%rbx,%r8,4), %r8 movq %rdi, %r9 imulq %rcx, %r9 leaq (%rax,%r9,4), %r9 movq %r14, %r10 xorl %r11d, %r11d .p2align 4, 0x90 .LBB4_23: # %.preheader.i81 # Parent Loop BB4_22 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB4_24 Depth 3 xorpd %xmm0, %xmm0 movq %r10, %r13 xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_24: # Parent Loop BB4_22 Depth=1 # Parent Loop BB4_23 Depth=2 # => This Inner Loop Header: Depth=3 movss (%r8,%r15), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss (%r13), %xmm1 addss %xmm1, %xmm0 addq $4, %r15 addq %rdx, %r13 cmpq %r15, %rdx jne .LBB4_24 # %bb.25: # %._crit_edge.i88 # in Loop: Header=BB4_23 Depth=2 movss %xmm0, (%r9,%r11,4) incq %r11 addq $4, %r10 cmpq %rcx, %r11 jne .LBB4_23 # %bb.26: # %._crit_edge30.i91 # in Loop: Header=BB4_22 Depth=1 incq %rdi addl %ebp, %esi cmpq %rcx, %rdi jne .LBB4_22 .LBB4_27: # %_Z13matmul_openmpiPfS_S_i.exit movq 32(%rsp), %r13 movzwl 40(%rsp), %r15d leaq 32(%rsp), %rdi callq ftime movq 32(%rsp), %rax movq %rax, 16(%rsp) # 8-byte Spill movzwl 40(%rsp), %eax movw %ax, 24(%rsp) # 2-byte Spill xorl %r14d, %r14d xorl %edi, %edi callq hipSetDevice movl $.Lstr, %edi callq puts@PLT movl $.L.str.2, %edi movl %ebp, %esi movq 64(%rsp), %rdx # 8-byte Reload # kill: def $edx killed $edx killed $rdx xorl %eax, %eax callq printf movl $.Lstr.3, %edi callq puts@PLT movl $.Lstr.2, %edi callq puts@PLT movl $.Lstr.3, %edi callq puts@PLT movsd .LCPI4_0(%rip), %xmm0 # xmm0 = mem[0],zero movsd 8(%rsp), %xmm3 # 8-byte Reload # xmm3 = mem[0],zero mulsd %xmm3, %xmm0 xorps %xmm1, %xmm1 cvtsi2sd %ebp, %xmm1 movapd %xmm1, %xmm2 addsd %xmm1, %xmm2 mulsd %xmm1, %xmm2 mulsd .LCPI4_1(%rip), %xmm3 mulsd %xmm1, %xmm2 movsd %xmm2, 64(%rsp) # 8-byte Spill movapd %xmm2, %xmm1 divsd %xmm3, %xmm1 testl %ebp, %ebp jle .LBB4_42 # %bb.28: # %.preheader.lr.ph.i94 movq %r13, 8(%rsp) # 8-byte Spill movl %r15d, %r13d movq 48(%rsp), %r15 # 8-byte Reload sarq $32, %r15 movl (%rsp), %eax # 4-byte Reload xorpd %xmm2, %xmm2 xorpd %xmm3, %xmm3 xorl %ecx, %ecx jmp .LBB4_29 .p2align 4, 0x90 .LBB4_33: # %._crit_edge.i96 # in Loop: Header=BB4_29 Depth=1 incq %rcx addl %ebp, %r14d cmpq %rax, %rcx je .LBB4_34 .LBB4_29: # %.preheader.i95 # =>This Loop Header: Depth=1 # Child Loop BB4_30 Depth 2 movl %r14d, %edx leaq (%r12,%rdx,4), %rdx xorl %esi, %esi movapd %xmm2, %xmm4 jmp .LBB4_30 .p2align 4, 0x90 .LBB4_32: # in Loop: Header=BB4_30 Depth=2 maxsd %xmm4, %xmm2 incq %rsi movapd %xmm2, %xmm4 cmpq %rsi, %rax je .LBB4_33 .LBB4_30: # Parent Loop BB4_29 Depth=1 # => This Inner Loop Header: Depth=2 movss (%rdx,%rsi,4), %xmm2 # xmm2 = mem[0],zero,zero,zero movaps %xmm2, %xmm5 subss %xmm2, %xmm5 divss %xmm2, %xmm5 xorps %xmm2, %xmm2 cvtss2sd %xmm5, %xmm2 ucomiss %xmm5, %xmm3 jbe .LBB4_32 # %bb.31: # in Loop: Header=BB4_30 Depth=2 xorps .LCPI4_2(%rip), %xmm2 jmp .LBB4_32 .LBB4_34: # %_Z8maxerroriiPfS_.exit movl $.L.str.5, %edi movb $3, %al callq printf testl %ebp, %ebp jle .LBB4_35 # %bb.36: # %.preheader.lr.ph.i102 movl (%rsp), %eax # 4-byte Reload leaq (%rbx,%r15,4), %rcx xorpd %xmm2, %xmm2 xorl %edx, %edx xorpd %xmm0, %xmm0 xorl %esi, %esi movl %r13d, %r15d movq 8(%rsp), %r13 # 8-byte Reload movaps .LCPI4_2(%rip), %xmm4 # xmm4 = [-0.0E+0,-0.0E+0] jmp .LBB4_37 .p2align 4, 0x90 .LBB4_41: # %._crit_edge.i107 # in Loop: Header=BB4_37 Depth=1 incq %rsi addl %ebp, %edx cmpq %rax, %rsi je .LBB4_43 .LBB4_37: # %.preheader.i104 # =>This Loop Header: Depth=1 # Child Loop BB4_38 Depth 2 movl %edx, %r8d leaq (%rcx,%r8,4), %rdi leaq (%r12,%r8,4), %r8 xorl %r9d, %r9d movapd %xmm2, %xmm1 jmp .LBB4_38 .p2align 4, 0x90 .LBB4_40: # in Loop: Header=BB4_38 Depth=2 maxsd %xmm1, %xmm2 incq %r9 movapd %xmm2, %xmm1 cmpq %r9, %rax je .LBB4_41 .LBB4_38: # Parent Loop BB4_37 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r8,%r9,4), %xmm2 # xmm2 = mem[0],zero,zero,zero movaps %xmm2, %xmm3 subss (%rdi,%r9,4), %xmm3 divss %xmm2, %xmm3 xorps %xmm2, %xmm2 cvtss2sd %xmm3, %xmm2 ucomiss %xmm3, %xmm0 jbe .LBB4_40 # %bb.39: # in Loop: Header=BB4_38 Depth=2 xorps %xmm4, %xmm2 jmp .LBB4_40 .LBB4_42: # %_Z8maxerroriiPfS_.exit117.critedge movl $.L.str.5, %edi xorpd %xmm2, %xmm2 movb $3, %al callq printf xorpd %xmm2, %xmm2 jmp .LBB4_43 .LBB4_35: xorpd %xmm2, %xmm2 movl %r13d, %r15d movq 8(%rsp), %r13 # 8-byte Reload .LBB4_43: # %_Z8maxerroriiPfS_.exit117 movzwl 24(%rsp), %eax # 2-byte Folded Reload xorps %xmm1, %xmm1 cvtsi2sd %eax, %xmm1 movsd .LCPI4_0(%rip), %xmm4 # xmm4 = mem[0],zero divsd %xmm4, %xmm1 xorps %xmm0, %xmm0 cvtsi2sdq 16(%rsp), %xmm0 # 8-byte Folded Reload addsd %xmm1, %xmm0 movzwl %r15w, %eax xorps %xmm1, %xmm1 cvtsi2sd %eax, %xmm1 divsd %xmm4, %xmm1 xorps %xmm3, %xmm3 cvtsi2sd %r13, %xmm3 addsd %xmm1, %xmm3 subsd %xmm3, %xmm0 movsd .LCPI4_1(%rip), %xmm3 # xmm3 = mem[0],zero mulsd %xmm0, %xmm3 movsd 64(%rsp), %xmm1 # 8-byte Reload # xmm1 = mem[0],zero divsd %xmm3, %xmm1 mulsd %xmm4, %xmm0 movl $.L.str.6, %edi movb $3, %al callq printf movq %rbx, %rdi callq free xorl %eax, %eax addq $72, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB4_44: .cfi_def_cfa_offset 128 movq stderr(%rip), %rdi movl $.L.str, %esi movl $5, %edx xorl %eax, %eax callq fprintf movl $1, %edi callq exit .Lfunc_end4: .size main, .Lfunc_end4-main .cfi_endproc # -- End function .globl _Z11matmul_baseiPfS_S_ # -- Begin function _Z11matmul_baseiPfS_S_ .p2align 4, 0x90 .type _Z11matmul_baseiPfS_S_,@function _Z11matmul_baseiPfS_S_: # @_Z11matmul_baseiPfS_S_ .cfi_startproc # %bb.0: testl %edi, %edi jle .LBB5_8 # %bb.1: # %.preheader26.lr.ph pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %edi, %eax leaq (,%rax,4), %r8 xorl %r9d, %r9d xorl %r10d, %r10d .p2align 4, 0x90 .LBB5_2: # %.preheader26 # =>This Loop Header: Depth=1 # Child Loop BB5_3 Depth 2 # Child Loop BB5_4 Depth 3 movl %r9d, %r11d leaq (%rsi,%r11,4), %r11 movq %r10, %rbx imulq %rax, %rbx leaq (%rcx,%rbx,4), %rbx movq %rdx, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB5_3: # %.preheader # Parent Loop BB5_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB5_4 Depth 3 xorps %xmm0, %xmm0 movq %r14, %r12 xorl %r13d, %r13d .p2align 4, 0x90 .LBB5_4: # Parent Loop BB5_2 Depth=1 # Parent Loop BB5_3 Depth=2 # => This Inner Loop Header: Depth=3 movss (%r11,%r13,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss (%r12), %xmm1 addss %xmm1, %xmm0 incq %r13 addq %r8, %r12 cmpq %r13, %rax jne .LBB5_4 # %bb.5: # %._crit_edge # in Loop: Header=BB5_3 Depth=2 movss %xmm0, (%rbx,%r15,4) incq %r15 addq $4, %r14 cmpq %rax, %r15 jne .LBB5_3 # %bb.6: # %._crit_edge30 # in Loop: Header=BB5_2 Depth=1 incq %r10 addl %edi, %r9d cmpq %rax, %r10 jne .LBB5_2 # %bb.7: popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .LBB5_8: # %._crit_edge32 retq .Lfunc_end5: .size _Z11matmul_baseiPfS_S_, .Lfunc_end5-_Z11matmul_baseiPfS_S_ .cfi_endproc # -- End function .globl _Z13matmul_openmpiPfS_S_i # -- Begin function _Z13matmul_openmpiPfS_S_i .p2align 4, 0x90 .type _Z13matmul_openmpiPfS_S_i,@function _Z13matmul_openmpiPfS_S_i: # @_Z13matmul_openmpiPfS_S_i .cfi_startproc # %bb.0: testl %edi, %edi jle .LBB6_8 # %bb.1: # %.preheader26.lr.ph pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %edi, %eax leaq (,%rax,4), %r8 xorl %r9d, %r9d xorl %r10d, %r10d .p2align 4, 0x90 .LBB6_2: # %.preheader26 # =>This Loop Header: Depth=1 # Child Loop BB6_3 Depth 2 # Child Loop BB6_4 Depth 3 movl %r9d, %r11d leaq (%rsi,%r11,4), %r11 movq %r10, %rbx imulq %rax, %rbx leaq (%rcx,%rbx,4), %rbx movq %rdx, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB6_3: # %.preheader # Parent Loop BB6_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB6_4 Depth 3 xorps %xmm0, %xmm0 movq %r14, %r12 xorl %r13d, %r13d .p2align 4, 0x90 .LBB6_4: # Parent Loop BB6_2 Depth=1 # Parent Loop BB6_3 Depth=2 # => This Inner Loop Header: Depth=3 movss (%r11,%r13,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss (%r12), %xmm1 addss %xmm1, %xmm0 incq %r13 addq %r8, %r12 cmpq %r13, %rax jne .LBB6_4 # %bb.5: # %._crit_edge # in Loop: Header=BB6_3 Depth=2 movss %xmm0, (%rbx,%r15,4) incq %r15 addq $4, %r14 cmpq %rax, %r15 jne .LBB6_3 # %bb.6: # %._crit_edge30 # in Loop: Header=BB6_2 Depth=1 incq %r10 addl %edi, %r9d cmpq %rax, %r10 jne .LBB6_2 # %bb.7: popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .LBB6_8: # %._crit_edge32 retq .Lfunc_end6: .size _Z13matmul_openmpiPfS_S_i, .Lfunc_end6-_Z13matmul_openmpiPfS_S_i .cfi_endproc # -- End function .globl _Z22matmul_cuda_v1_vanillaiPfS_S_ # -- Begin function _Z22matmul_cuda_v1_vanillaiPfS_S_ .p2align 4, 0x90 .type _Z22matmul_cuda_v1_vanillaiPfS_S_,@function _Z22matmul_cuda_v1_vanillaiPfS_S_: # @_Z22matmul_cuda_v1_vanillaiPfS_S_ .cfi_startproc # %bb.0: retq .Lfunc_end7: .size _Z22matmul_cuda_v1_vanillaiPfS_S_, .Lfunc_end7-_Z22matmul_cuda_v1_vanillaiPfS_S_ .cfi_endproc # -- End function .globl _Z20matmul_cuda_v1_shmemiPfS_S_ # -- Begin function _Z20matmul_cuda_v1_shmemiPfS_S_ .p2align 4, 0x90 .type _Z20matmul_cuda_v1_shmemiPfS_S_,@function _Z20matmul_cuda_v1_shmemiPfS_S_: # @_Z20matmul_cuda_v1_shmemiPfS_S_ .cfi_startproc # %bb.0: retq .Lfunc_end8: .size _Z20matmul_cuda_v1_shmemiPfS_S_, .Lfunc_end8-_Z20matmul_cuda_v1_shmemiPfS_S_ .cfi_endproc # -- End function .globl _Z21matmul_cuda_v1_cublasiPfS_S_ # -- Begin function _Z21matmul_cuda_v1_cublasiPfS_S_ .p2align 4, 0x90 .type _Z21matmul_cuda_v1_cublasiPfS_S_,@function _Z21matmul_cuda_v1_cublasiPfS_S_: # @_Z21matmul_cuda_v1_cublasiPfS_S_ .cfi_startproc # %bb.0: retq .Lfunc_end9: .size _Z21matmul_cuda_v1_cublasiPfS_S_, .Lfunc_end9-_Z21matmul_cuda_v1_cublasiPfS_S_ .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Usage: matmul <n> [<#tasks(%d)>]\n" .size .L.str, 34 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n" .size .L.str.2, 80 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "matmul_base:\t\t%4f\t%4f \t\t%g\n" .size .L.str.5, 28 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "matmul_openmp:\t\t%4f\t%4f \t\t%g\n" .size .L.str.6, 30 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "======================================================================================================" .size .Lstr, 103 .type .Lstr.2,@object # @str.2 .Lstr.2: .asciz "Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)" .size .Lstr.2, 62 .type .Lstr.3,@object # @str.3 .Lstr.3: .asciz "------------------------------------------------------------------------------------------------------" .size .Lstr.3, 103 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void kernel_sqrtweights_fl(int N, float *wt){ unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; /* make sure to use only M threads */ if (tid<N) { wt[tid]=sqrtf(wt[tid]); } }
code for sm_80 Function : _Z21kernel_sqrtweights_fliPf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x160], PT ; /* 0x0000580002007a0c */ /* 0x000fda0003f06070 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */ /* 0x000fca00078e0003 */ /*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */ /* 0x000ea2000c1e1900 */ /*00a0*/ BSSY B0, 0x180 ; /* 0x000000d000007945 */ /* 0x000fe20003800000 */ /*00b0*/ IADD3 R4, R0, -0xd000000, RZ ; /* 0xf300000000047810 */ /* 0x004fe20007ffe0ff */ /*00c0*/ MUFU.RSQ R5, R0 ; /* 0x0000000000057308 */ /* 0x0000660000001400 */ /*00d0*/ ISETP.GT.U32.AND P0, PT, R4, 0x727fffff, PT ; /* 0x727fffff0400780c */ /* 0x000fda0003f04070 */ /*00e0*/ @!P0 BRA 0x130 ; /* 0x0000004000008947 */ /* 0x000fea0003800000 */ /*00f0*/ MOV R9, 0x110 ; /* 0x0000011000097802 */ /* 0x003fe40000000f00 */ /*0100*/ CALL.REL.NOINC 0x1a0 ; /* 0x0000009000007944 */ /* 0x000fea0003c00000 */ /*0110*/ MOV R5, R0 ; /* 0x0000000000057202 */ /* 0x000fe20000000f00 */ /*0120*/ BRA 0x170 ; /* 0x0000004000007947 */ /* 0x000fea0003800000 */ /*0130*/ FMUL.FTZ R7, R0, R5 ; /* 0x0000000500077220 */ /* 0x003fe40000410000 */ /*0140*/ FMUL.FTZ R5, R5, 0.5 ; /* 0x3f00000005057820 */ /* 0x000fe40000410000 */ /*0150*/ FFMA R0, -R7, R7, R0 ; /* 0x0000000707007223 */ /* 0x000fc80000000100 */ /*0160*/ FFMA R5, R0, R5, R7 ; /* 0x0000000500057223 */ /* 0x000fe40000000007 */ /*0170*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0180*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*0190*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01a0*/ LOP3.LUT P0, RZ, R0, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff00ff7812 */ /* 0x000fda000780c0ff */ /*01b0*/ @!P0 MOV R4, R0 ; /* 0x0000000000048202 */ /* 0x000fe20000000f00 */ /*01c0*/ @!P0 BRA 0x2d0 ; /* 0x0000010000008947 */ /* 0x000fea0003800000 */ /*01d0*/ FSETP.GEU.FTZ.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720b */ /* 0x000fda0003f1e000 */ /*01e0*/ @!P0 MOV R4, 0x7fffffff ; /* 0x7fffffff00048802 */ /* 0x000fe20000000f00 */ /*01f0*/ @!P0 BRA 0x2d0 ; /* 0x000000d000008947 */ /* 0x000fea0003800000 */ /*0200*/ FSETP.GTU.FTZ.AND P0, PT, |R0|, +INF , PT ; /* 0x7f8000000000780b */ /* 0x000fda0003f1c200 */ /*0210*/ @P0 FADD.FTZ R4, R0, 1 ; /* 0x3f80000000040421 */ /* 0x000fe20000010000 */ /*0220*/ @P0 BRA 0x2d0 ; /* 0x000000a000000947 */ /* 0x000fea0003800000 */ /*0230*/ FSETP.NEU.FTZ.AND P0, PT, |R0|, +INF , PT ; /* 0x7f8000000000780b */ /* 0x000fda0003f1d200 */ /*0240*/ @P0 FFMA R5, R0, 1.84467440737095516160e+19, RZ ; /* 0x5f80000000050823 */ /* 0x000fc800000000ff */ /*0250*/ @P0 MUFU.RSQ R4, R5 ; /* 0x0000000500040308 */ /* 0x000e240000001400 */ /*0260*/ @P0 FMUL.FTZ R6, R5, R4 ; /* 0x0000000405060220 */ /* 0x001fe40000410000 */ /*0270*/ @P0 FMUL.FTZ R8, R4, 0.5 ; /* 0x3f00000004080820 */ /* 0x000fe20000410000 */ /*0280*/ @!P0 MOV R4, R0 ; /* 0x0000000000048202 */ /* 0x000fe20000000f00 */ /*0290*/ @P0 FADD.FTZ R7, -R6, -RZ ; /* 0x800000ff06070221 */ /* 0x000fc80000010100 */ /*02a0*/ @P0 FFMA R7, R6, R7, R5 ; /* 0x0000000706070223 */ /* 0x000fc80000000005 */ /*02b0*/ @P0 FFMA R7, R7, R8, R6 ; /* 0x0000000807070223 */ /* 0x000fc80000000006 */ /*02c0*/ @P0 FMUL.FTZ R4, R7, 2.3283064365386962891e-10 ; /* 0x2f80000007040820 */ /* 0x000fc80000410000 */ /*02d0*/ HFMA2.MMA R5, -RZ, RZ, 0, 0 ; /* 0x00000000ff057435 */ /* 0x000fe200000001ff */ /*02e0*/ MOV R0, R4 ; /* 0x0000000400007202 */ /* 0x000fe40000000f00 */ /*02f0*/ MOV R4, R9 ; /* 0x0000000900047202 */ /* 0x000fc80000000f00 */ /*0300*/ RET.REL.NODEC R4 0x0 ; /* 0xfffffcf004007950 */ /* 0x000fea0003c3ffff */ /*0310*/ BRA 0x310; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0380*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0390*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void kernel_sqrtweights_fl(int N, float *wt){ unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; /* make sure to use only M threads */ if (tid<N) { wt[tid]=sqrtf(wt[tid]); } }
.file "tmpxft_000fab38_00000000-6_kernel_sqrtweights_fl.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z42__device_stub__Z21kernel_sqrtweights_fliPfiPf .type _Z42__device_stub__Z21kernel_sqrtweights_fliPfiPf, @function _Z42__device_stub__Z21kernel_sqrtweights_fliPfiPf: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 12(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 12(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z21kernel_sqrtweights_fliPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z42__device_stub__Z21kernel_sqrtweights_fliPfiPf, .-_Z42__device_stub__Z21kernel_sqrtweights_fliPfiPf .globl _Z21kernel_sqrtweights_fliPf .type _Z21kernel_sqrtweights_fliPf, @function _Z21kernel_sqrtweights_fliPf: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z21kernel_sqrtweights_fliPfiPf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z21kernel_sqrtweights_fliPf, .-_Z21kernel_sqrtweights_fliPf .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z21kernel_sqrtweights_fliPf" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z21kernel_sqrtweights_fliPf(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void kernel_sqrtweights_fl(int N, float *wt){ unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; /* make sure to use only M threads */ if (tid<N) { wt[tid]=sqrtf(wt[tid]); } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void kernel_sqrtweights_fl(int N, float *wt){ unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; /* make sure to use only M threads */ if (tid<N) { wt[tid]=sqrtf(wt[tid]); } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void kernel_sqrtweights_fl(int N, float *wt){ unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; /* make sure to use only M threads */ if (tid<N) { wt[tid]=sqrtf(wt[tid]); } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z21kernel_sqrtweights_fliPf .globl _Z21kernel_sqrtweights_fliPf .p2align 8 .type _Z21kernel_sqrtweights_fliPf,@function _Z21kernel_sqrtweights_fliPf: s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b32 s3, s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_u32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b64 s[0:1], s[0:1], 0x8 v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_load_b32 v2, v[0:1], off s_waitcnt vmcnt(0) v_mul_f32_e32 v3, 0x4f800000, v2 v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v2 v_cndmask_b32_e32 v2, v2, v3, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2) v_sqrt_f32_e32 v3, v2 s_waitcnt_depctr 0xfff v_add_nc_u32_e32 v4, -1, v3 v_add_nc_u32_e32 v5, 1, v3 v_fma_f32 v6, -v4, v3, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f32 v7, -v5, v3, v2 v_cmp_ge_f32_e64 s0, 0, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e64 v3, v3, v4, s0 v_cmp_lt_f32_e64 s0, 0, v7 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e64 v3, v3, v5, s0 v_mul_f32_e32 v4, 0x37800000, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v3, v3, v4, vcc_lo v_cmp_class_f32_e64 vcc_lo, v2, 0x260 v_cndmask_b32_e32 v2, v3, v2, vcc_lo global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z21kernel_sqrtweights_fliPf .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z21kernel_sqrtweights_fliPf, .Lfunc_end0-_Z21kernel_sqrtweights_fliPf .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z21kernel_sqrtweights_fliPf .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z21kernel_sqrtweights_fliPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void kernel_sqrtweights_fl(int N, float *wt){ unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; /* make sure to use only M threads */ if (tid<N) { wt[tid]=sqrtf(wt[tid]); } }
.text .file "kernel_sqrtweights_fl.hip" .globl _Z36__device_stub__kernel_sqrtweights_fliPf # -- Begin function _Z36__device_stub__kernel_sqrtweights_fliPf .p2align 4, 0x90 .type _Z36__device_stub__kernel_sqrtweights_fliPf,@function _Z36__device_stub__kernel_sqrtweights_fliPf: # @_Z36__device_stub__kernel_sqrtweights_fliPf .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movl %edi, 4(%rsp) movq %rsi, 56(%rsp) leaq 4(%rsp), %rax movq %rax, 64(%rsp) leaq 56(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z21kernel_sqrtweights_fliPf, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z36__device_stub__kernel_sqrtweights_fliPf, .Lfunc_end0-_Z36__device_stub__kernel_sqrtweights_fliPf .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z21kernel_sqrtweights_fliPf, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z21kernel_sqrtweights_fliPf,@object # @_Z21kernel_sqrtweights_fliPf .section .rodata,"a",@progbits .globl _Z21kernel_sqrtweights_fliPf .p2align 3, 0x0 _Z21kernel_sqrtweights_fliPf: .quad _Z36__device_stub__kernel_sqrtweights_fliPf .size _Z21kernel_sqrtweights_fliPf, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z21kernel_sqrtweights_fliPf" .size .L__unnamed_1, 29 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z36__device_stub__kernel_sqrtweights_fliPf .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z21kernel_sqrtweights_fliPf .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z21kernel_sqrtweights_fliPf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x160], PT ; /* 0x0000580002007a0c */ /* 0x000fda0003f06070 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */ /* 0x000fca00078e0003 */ /*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */ /* 0x000ea2000c1e1900 */ /*00a0*/ BSSY B0, 0x180 ; /* 0x000000d000007945 */ /* 0x000fe20003800000 */ /*00b0*/ IADD3 R4, R0, -0xd000000, RZ ; /* 0xf300000000047810 */ /* 0x004fe20007ffe0ff */ /*00c0*/ MUFU.RSQ R5, R0 ; /* 0x0000000000057308 */ /* 0x0000660000001400 */ /*00d0*/ ISETP.GT.U32.AND P0, PT, R4, 0x727fffff, PT ; /* 0x727fffff0400780c */ /* 0x000fda0003f04070 */ /*00e0*/ @!P0 BRA 0x130 ; /* 0x0000004000008947 */ /* 0x000fea0003800000 */ /*00f0*/ MOV R9, 0x110 ; /* 0x0000011000097802 */ /* 0x003fe40000000f00 */ /*0100*/ CALL.REL.NOINC 0x1a0 ; /* 0x0000009000007944 */ /* 0x000fea0003c00000 */ /*0110*/ MOV R5, R0 ; /* 0x0000000000057202 */ /* 0x000fe20000000f00 */ /*0120*/ BRA 0x170 ; /* 0x0000004000007947 */ /* 0x000fea0003800000 */ /*0130*/ FMUL.FTZ R7, R0, R5 ; /* 0x0000000500077220 */ /* 0x003fe40000410000 */ /*0140*/ FMUL.FTZ R5, R5, 0.5 ; /* 0x3f00000005057820 */ /* 0x000fe40000410000 */ /*0150*/ FFMA R0, -R7, R7, R0 ; /* 0x0000000707007223 */ /* 0x000fc80000000100 */ /*0160*/ FFMA R5, R0, R5, R7 ; /* 0x0000000500057223 */ /* 0x000fe40000000007 */ /*0170*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0180*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*0190*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01a0*/ LOP3.LUT P0, RZ, R0, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff00ff7812 */ /* 0x000fda000780c0ff */ /*01b0*/ @!P0 MOV R4, R0 ; /* 0x0000000000048202 */ /* 0x000fe20000000f00 */ /*01c0*/ @!P0 BRA 0x2d0 ; /* 0x0000010000008947 */ /* 0x000fea0003800000 */ /*01d0*/ FSETP.GEU.FTZ.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720b */ /* 0x000fda0003f1e000 */ /*01e0*/ @!P0 MOV R4, 0x7fffffff ; /* 0x7fffffff00048802 */ /* 0x000fe20000000f00 */ /*01f0*/ @!P0 BRA 0x2d0 ; /* 0x000000d000008947 */ /* 0x000fea0003800000 */ /*0200*/ FSETP.GTU.FTZ.AND P0, PT, |R0|, +INF , PT ; /* 0x7f8000000000780b */ /* 0x000fda0003f1c200 */ /*0210*/ @P0 FADD.FTZ R4, R0, 1 ; /* 0x3f80000000040421 */ /* 0x000fe20000010000 */ /*0220*/ @P0 BRA 0x2d0 ; /* 0x000000a000000947 */ /* 0x000fea0003800000 */ /*0230*/ FSETP.NEU.FTZ.AND P0, PT, |R0|, +INF , PT ; /* 0x7f8000000000780b */ /* 0x000fda0003f1d200 */ /*0240*/ @P0 FFMA R5, R0, 1.84467440737095516160e+19, RZ ; /* 0x5f80000000050823 */ /* 0x000fc800000000ff */ /*0250*/ @P0 MUFU.RSQ R4, R5 ; /* 0x0000000500040308 */ /* 0x000e240000001400 */ /*0260*/ @P0 FMUL.FTZ R6, R5, R4 ; /* 0x0000000405060220 */ /* 0x001fe40000410000 */ /*0270*/ @P0 FMUL.FTZ R8, R4, 0.5 ; /* 0x3f00000004080820 */ /* 0x000fe20000410000 */ /*0280*/ @!P0 MOV R4, R0 ; /* 0x0000000000048202 */ /* 0x000fe20000000f00 */ /*0290*/ @P0 FADD.FTZ R7, -R6, -RZ ; /* 0x800000ff06070221 */ /* 0x000fc80000010100 */ /*02a0*/ @P0 FFMA R7, R6, R7, R5 ; /* 0x0000000706070223 */ /* 0x000fc80000000005 */ /*02b0*/ @P0 FFMA R7, R7, R8, R6 ; /* 0x0000000807070223 */ /* 0x000fc80000000006 */ /*02c0*/ @P0 FMUL.FTZ R4, R7, 2.3283064365386962891e-10 ; /* 0x2f80000007040820 */ /* 0x000fc80000410000 */ /*02d0*/ HFMA2.MMA R5, -RZ, RZ, 0, 0 ; /* 0x00000000ff057435 */ /* 0x000fe200000001ff */ /*02e0*/ MOV R0, R4 ; /* 0x0000000400007202 */ /* 0x000fe40000000f00 */ /*02f0*/ MOV R4, R9 ; /* 0x0000000900047202 */ /* 0x000fc80000000f00 */ /*0300*/ RET.REL.NODEC R4 0x0 ; /* 0xfffffcf004007950 */ /* 0x000fea0003c3ffff */ /*0310*/ BRA 0x310; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0380*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0390*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z21kernel_sqrtweights_fliPf .globl _Z21kernel_sqrtweights_fliPf .p2align 8 .type _Z21kernel_sqrtweights_fliPf,@function _Z21kernel_sqrtweights_fliPf: s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b32 s3, s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_u32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b64 s[0:1], s[0:1], 0x8 v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_load_b32 v2, v[0:1], off s_waitcnt vmcnt(0) v_mul_f32_e32 v3, 0x4f800000, v2 v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v2 v_cndmask_b32_e32 v2, v2, v3, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2) v_sqrt_f32_e32 v3, v2 s_waitcnt_depctr 0xfff v_add_nc_u32_e32 v4, -1, v3 v_add_nc_u32_e32 v5, 1, v3 v_fma_f32 v6, -v4, v3, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f32 v7, -v5, v3, v2 v_cmp_ge_f32_e64 s0, 0, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e64 v3, v3, v4, s0 v_cmp_lt_f32_e64 s0, 0, v7 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e64 v3, v3, v5, s0 v_mul_f32_e32 v4, 0x37800000, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v3, v3, v4, vcc_lo v_cmp_class_f32_e64 vcc_lo, v2, 0x260 v_cndmask_b32_e32 v2, v3, v2, vcc_lo global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z21kernel_sqrtweights_fliPf .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z21kernel_sqrtweights_fliPf, .Lfunc_end0-_Z21kernel_sqrtweights_fliPf .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z21kernel_sqrtweights_fliPf .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z21kernel_sqrtweights_fliPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000fab38_00000000-6_kernel_sqrtweights_fl.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z42__device_stub__Z21kernel_sqrtweights_fliPfiPf .type _Z42__device_stub__Z21kernel_sqrtweights_fliPfiPf, @function _Z42__device_stub__Z21kernel_sqrtweights_fliPfiPf: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 12(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 12(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z21kernel_sqrtweights_fliPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z42__device_stub__Z21kernel_sqrtweights_fliPfiPf, .-_Z42__device_stub__Z21kernel_sqrtweights_fliPfiPf .globl _Z21kernel_sqrtweights_fliPf .type _Z21kernel_sqrtweights_fliPf, @function _Z21kernel_sqrtweights_fliPf: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z21kernel_sqrtweights_fliPfiPf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z21kernel_sqrtweights_fliPf, .-_Z21kernel_sqrtweights_fliPf .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z21kernel_sqrtweights_fliPf" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z21kernel_sqrtweights_fliPf(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "kernel_sqrtweights_fl.hip" .globl _Z36__device_stub__kernel_sqrtweights_fliPf # -- Begin function _Z36__device_stub__kernel_sqrtweights_fliPf .p2align 4, 0x90 .type _Z36__device_stub__kernel_sqrtweights_fliPf,@function _Z36__device_stub__kernel_sqrtweights_fliPf: # @_Z36__device_stub__kernel_sqrtweights_fliPf .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movl %edi, 4(%rsp) movq %rsi, 56(%rsp) leaq 4(%rsp), %rax movq %rax, 64(%rsp) leaq 56(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z21kernel_sqrtweights_fliPf, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z36__device_stub__kernel_sqrtweights_fliPf, .Lfunc_end0-_Z36__device_stub__kernel_sqrtweights_fliPf .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z21kernel_sqrtweights_fliPf, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z21kernel_sqrtweights_fliPf,@object # @_Z21kernel_sqrtweights_fliPf .section .rodata,"a",@progbits .globl _Z21kernel_sqrtweights_fliPf .p2align 3, 0x0 _Z21kernel_sqrtweights_fliPf: .quad _Z36__device_stub__kernel_sqrtweights_fliPf .size _Z21kernel_sqrtweights_fliPf, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z21kernel_sqrtweights_fliPf" .size .L__unnamed_1, 29 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z36__device_stub__kernel_sqrtweights_fliPf .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z21kernel_sqrtweights_fliPf .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <sys/time.h> /* This file can be downloaded from supercomputingblog.com. This is part of a series of tutorials that demonstrate how to use CUDA The tutorials will also demonstrate the speed of using CUDA */ // IMPORTANT NOTE: for this data size, your graphics card should have at least 512 megabytes of memory. // If your GPU has less memory, then you will need to decrease this data size. #define MAX_DATA_SIZE 1024*1024*32 // about 32 million elements. // The max data size must be an integer multiple of 128*256, because each block will have 256 threads, // and the block grid width will be 128. These are arbitrary numbers I choose. void get_walltime(double* wcTime) { struct timeval tp; gettimeofday(&tp, NULL); *wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0); } double myDiffTime(struct timeval &start, struct timeval &end) { double d_start, d_end; d_start = (double)(start.tv_sec + start.tv_usec/1000000.0); d_end = (double)(end.tv_sec + end.tv_usec/1000000.0); return (d_end - d_start); } void GoldenBrick(float *pA, float *pB, float *pResult, int count) { for (int i=0; i < count; i++) { //pResult[count] = pA[count] * pB[count]; //pResult[count] = pA[count] * pB[count] / 12.34567; //pResult[count] = sqrt(pA[count] * pB[count] / 12.34567); pResult[count] = sqrt(pA[count] * pB[count] / 12.34567) * sin(pA[count]); } } __global__ void multiplyNumbersGPU(float *pDataA, float *pDataB, float *pResult) { // Because of the simplicity of this tutorial, we are going to assume that // every block has 256 threads. Each thread simply multiplies two numbers, // and then stores the result. // The grid of blocks is 128 blocks long. int tid = (blockIdx.y * 128 * 256) + blockIdx.x * 256 + threadIdx.x; // This gives every thread a unique ID. // By no coincidence, we'll be using this thread ID to determine which data elements to multiply. //pResult[tid] = pDataA[tid] * pDataB[tid]; // Each thread only multiplies one data element. //pResult[tid] = pDataA[tid] * pDataB[tid] / 12.34567; //pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567); pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567) * sin(pDataA[tid]); } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ float *h_dataA, *h_dataB, *h_resultC; float *d_dataA, *d_dataB, *d_resultC; double gpuTime; int i; timeval start, end; printf("Initializing data...\n"); h_dataA = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); h_dataB = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); h_resultC = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); cudaMalloc( (void **)&d_dataA, sizeof(float) * MAX_DATA_SIZE) ; cudaMalloc( (void **)&d_dataB, sizeof(float) * MAX_DATA_SIZE) ; cudaMalloc( (void **)&d_resultC , sizeof(float) * MAX_DATA_SIZE) ; srand(123); for(i = 0; i < MAX_DATA_SIZE; i++) { h_dataA[i] = (float)rand() / (float)RAND_MAX; h_dataB[i] = (float)rand() / (float)RAND_MAX; } int firstRun = 1; // Indicates if it's the first execution of the for loop const int useGPU = 1; // When 0, only the CPU is used. When 1, only the GPU is used for (int dataAmount = MAX_DATA_SIZE; dataAmount > 128*256; dataAmount /= 2) { int blockGridWidth = 128; int blockGridHeight = (dataAmount / 256) / blockGridWidth; dim3 blockGridRows(blockGridWidth, blockGridHeight); dim3 threadBlockRows(256, 1); // Start the timer. // We want to measure copying data, running the kernel, and copying the results back to host gettimeofday(&start, NULL); if (useGPU == 0) { // Copy the data to the device cudaMemcpy(d_dataA, h_dataA, sizeof(float) * dataAmount, cudaMemcpyHostToDevice) ; cudaMemcpy(d_dataB, h_dataB, sizeof(float) * dataAmount, cudaMemcpyHostToDevice) ; // Do the multiplication on the GPU multiplyNumbersGPU<<<blockGridRows, threadBlockRows>>>(d_dataA, d_dataB, d_resultC); cudaThreadSynchronize() ; // Copy the data back to the host cudaMemcpy(h_resultC, d_resultC, sizeof(float) * dataAmount, cudaMemcpyDeviceToHost) ; } else { // We're using the CPU only GoldenBrick(h_dataA, h_dataB, h_resultC, dataAmount); } // Stop the timer, print the total round trip execution time. gettimeofday(&end, NULL); gpuTime = myDiffTime(start, end); if (!firstRun || !useGPU) { printf("Elements: %d - convolution time : %f msec - %f Multiplications/sec\n", dataAmount, gpuTime, blockGridHeight * 128 * 256 / (gpuTime * 0.001)); } else { firstRun = 0; // We discard the results of the first run because of the extra overhead incurred // during the first time a kernel is ever executed. dataAmount *= 2; // reset to first run value } } printf("Cleaning up...\n"); cudaFree(d_resultC ) ; cudaFree(d_dataB) ; cudaFree(d_dataA) ; free(h_resultC); free(h_dataB); free(h_dataA); }
.file "tmpxft_0015c6dc_00000000-6_cuda_tutorial11.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z12get_walltimePd .type _Z12get_walltimePd, @function _Z12get_walltimePd: .LFB2057: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $32, %rsp .cfi_def_cfa_offset 48 movq %rdi, %rbx movq %fs:40, %rax movq %rax, 24(%rsp) xorl %eax, %eax movq %rsp, %rdi movl $0, %esi call gettimeofday@PLT pxor %xmm0, %xmm0 cvtsi2sdq 8(%rsp), %xmm0 divsd .LC0(%rip), %xmm0 pxor %xmm1, %xmm1 cvtsi2sdq (%rsp), %xmm1 addsd %xmm1, %xmm0 movsd %xmm0, (%rbx) movq 24(%rsp), %rax subq %fs:40, %rax jne .L6 addq $32, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L6: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z12get_walltimePd, .-_Z12get_walltimePd .globl _Z10myDiffTimeR7timevalS0_ .type _Z10myDiffTimeR7timevalS0_, @function _Z10myDiffTimeR7timevalS0_: .LFB2058: .cfi_startproc endbr64 pxor %xmm0, %xmm0 cvtsi2sdq 8(%rsi), %xmm0 movsd .LC0(%rip), %xmm2 divsd %xmm2, %xmm0 pxor %xmm1, %xmm1 cvtsi2sdq (%rsi), %xmm1 addsd %xmm1, %xmm0 pxor %xmm1, %xmm1 cvtsi2sdq 8(%rdi), %xmm1 divsd %xmm2, %xmm1 pxor %xmm2, %xmm2 cvtsi2sdq (%rdi), %xmm2 addsd %xmm2, %xmm1 subsd %xmm1, %xmm0 ret .cfi_endproc .LFE2058: .size _Z10myDiffTimeR7timevalS0_, .-_Z10myDiffTimeR7timevalS0_ .globl _Z11GoldenBrickPfS_S_i .type _Z11GoldenBrickPfS_S_i, @function _Z11GoldenBrickPfS_S_i: .LFB2059: .cfi_startproc endbr64 testl %ecx, %ecx jle .L18 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $24, %rsp .cfi_def_cfa_offset 80 movl %ecx, %ebp movslq %ecx, %rax salq $2, %rax leaq (%rdi,%rax), %r14 leaq (%rsi,%rax), %r13 leaq (%rdx,%rax), %r12 movl $0, %ebx .L13: movl (%r14), %r15d movd %r15d, %xmm0 mulss 0(%r13), %xmm0 cvtss2sd %xmm0, %xmm0 divsd .LC1(%rip), %xmm0 pxor %xmm1, %xmm1 ucomisd %xmm0, %xmm1 ja .L16 sqrtsd %xmm0, %xmm0 movsd %xmm0, 8(%rsp) .L12: movd %r15d, %xmm0 call sinf@PLT cvtss2sd %xmm0, %xmm0 mulsd 8(%rsp), %xmm0 cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%r12) addl $1, %ebx cmpl %ebx, %ebp jne .L13 addq $24, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L16: .cfi_restore_state call sqrt@PLT movsd %xmm0, 8(%rsp) jmp .L12 .L18: .cfi_def_cfa_offset 8 .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 .cfi_restore 13 .cfi_restore 14 .cfi_restore 15 ret .cfi_endproc .LFE2059: .size _Z11GoldenBrickPfS_S_i, .-_Z11GoldenBrickPfS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC3: .string "Initializing data...\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC6: .string "Elements: %d - convolution time : %f msec - %f Multiplications/sec\n" .section .rodata.str1.1 .LC7: .string "Cleaning up...\n" .text .globl main .type main, @function main: .LFB2060: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $104, %rsp .cfi_def_cfa_offset 160 movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq .LC3(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $134217728, %edi call malloc@PLT movq %rax, %rbx movl $134217728, %edi call malloc@PLT movq %rax, %rbp movl $134217728, %edi call malloc@PLT movq %rax, %r14 leaq 24(%rsp), %rdi movl $134217728, %esi call cudaMalloc@PLT leaq 32(%rsp), %rdi movl $134217728, %esi call cudaMalloc@PLT leaq 40(%rsp), %rdi movl $134217728, %esi call cudaMalloc@PLT movl $123, %edi call srand@PLT movl $0, %r12d .L22: call rand@PLT pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 mulss .LC4(%rip), %xmm0 movss %xmm0, (%rbx,%r12) call rand@PLT pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 mulss .LC4(%rip), %xmm0 movss %xmm0, 0(%rbp,%r12) addq $4, %r12 cmpq $134217728, %r12 jne .L22 movl $1, %r13d movl $33554432, %r12d leaq 48(%rsp), %r15 leaq 64(%rsp), %rax movq %rax, 8(%rsp) jmp .L25 .L23: movl $0, %r13d .L25: movl $0, %esi movq %r15, %rdi call gettimeofday@PLT movl %r12d, %ecx movq %r14, %rdx movq %rbp, %rsi movq %rbx, %rdi call _Z11GoldenBrickPfS_S_i movl $0, %esi movq 8(%rsp), %rdi call gettimeofday@PLT testl %r13d, %r13d jne .L23 movq 8(%rsp), %rsi movq %r15, %rdi call _Z10myDiffTimeR7timevalS0_ leal 32767(%r12), %eax testl %r12d, %r12d cmovns %r12d, %eax andl $-32768, %eax pxor %xmm1, %xmm1 cvtsi2sdl %eax, %xmm1 movapd %xmm0, %xmm2 mulsd .LC5(%rip), %xmm2 divsd %xmm2, %xmm1 movl %r12d, %edx leaq .LC6(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT cmpl $65537, %r12d jle .L24 movl %r12d, %eax shrl $31, %eax addl %r12d, %eax sarl %eax movl %eax, %r12d jmp .L23 .L24: leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq %r14, %rdi call free@PLT movq %rbp, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq 88(%rsp), %rax subq %fs:40, %rax jne .L29 movl $0, %eax addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size main, .-main .globl _Z42__device_stub__Z18multiplyNumbersGPUPfS_S_PfS_S_ .type _Z42__device_stub__Z18multiplyNumbersGPUPfS_S_PfS_S_, @function _Z42__device_stub__Z18multiplyNumbersGPUPfS_S_PfS_S_: .LFB2085: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L34 .L30: movq 120(%rsp), %rax subq %fs:40, %rax jne .L35 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L34: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z18multiplyNumbersGPUPfS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L30 .L35: call __stack_chk_fail@PLT .cfi_endproc .LFE2085: .size _Z42__device_stub__Z18multiplyNumbersGPUPfS_S_PfS_S_, .-_Z42__device_stub__Z18multiplyNumbersGPUPfS_S_PfS_S_ .globl _Z18multiplyNumbersGPUPfS_S_ .type _Z18multiplyNumbersGPUPfS_S_, @function _Z18multiplyNumbersGPUPfS_S_: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z18multiplyNumbersGPUPfS_S_PfS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _Z18multiplyNumbersGPUPfS_S_, .-_Z18multiplyNumbersGPUPfS_S_ .section .rodata.str1.1 .LC8: .string "_Z18multiplyNumbersGPUPfS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq _Z18multiplyNumbersGPUPfS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 0 .long 1093567616 .align 8 .LC1: .long -1467848023 .long 1076408571 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC4: .long 805306368 .section .rodata.cst8 .align 8 .LC5: .long -755914244 .long 1062232653 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <sys/time.h> /* This file can be downloaded from supercomputingblog.com. This is part of a series of tutorials that demonstrate how to use CUDA The tutorials will also demonstrate the speed of using CUDA */ // IMPORTANT NOTE: for this data size, your graphics card should have at least 512 megabytes of memory. // If your GPU has less memory, then you will need to decrease this data size. #define MAX_DATA_SIZE 1024*1024*32 // about 32 million elements. // The max data size must be an integer multiple of 128*256, because each block will have 256 threads, // and the block grid width will be 128. These are arbitrary numbers I choose. void get_walltime(double* wcTime) { struct timeval tp; gettimeofday(&tp, NULL); *wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0); } double myDiffTime(struct timeval &start, struct timeval &end) { double d_start, d_end; d_start = (double)(start.tv_sec + start.tv_usec/1000000.0); d_end = (double)(end.tv_sec + end.tv_usec/1000000.0); return (d_end - d_start); } void GoldenBrick(float *pA, float *pB, float *pResult, int count) { for (int i=0; i < count; i++) { //pResult[count] = pA[count] * pB[count]; //pResult[count] = pA[count] * pB[count] / 12.34567; //pResult[count] = sqrt(pA[count] * pB[count] / 12.34567); pResult[count] = sqrt(pA[count] * pB[count] / 12.34567) * sin(pA[count]); } } __global__ void multiplyNumbersGPU(float *pDataA, float *pDataB, float *pResult) { // Because of the simplicity of this tutorial, we are going to assume that // every block has 256 threads. Each thread simply multiplies two numbers, // and then stores the result. // The grid of blocks is 128 blocks long. int tid = (blockIdx.y * 128 * 256) + blockIdx.x * 256 + threadIdx.x; // This gives every thread a unique ID. // By no coincidence, we'll be using this thread ID to determine which data elements to multiply. //pResult[tid] = pDataA[tid] * pDataB[tid]; // Each thread only multiplies one data element. //pResult[tid] = pDataA[tid] * pDataB[tid] / 12.34567; //pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567); pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567) * sin(pDataA[tid]); } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ float *h_dataA, *h_dataB, *h_resultC; float *d_dataA, *d_dataB, *d_resultC; double gpuTime; int i; timeval start, end; printf("Initializing data...\n"); h_dataA = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); h_dataB = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); h_resultC = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); cudaMalloc( (void **)&d_dataA, sizeof(float) * MAX_DATA_SIZE) ; cudaMalloc( (void **)&d_dataB, sizeof(float) * MAX_DATA_SIZE) ; cudaMalloc( (void **)&d_resultC , sizeof(float) * MAX_DATA_SIZE) ; srand(123); for(i = 0; i < MAX_DATA_SIZE; i++) { h_dataA[i] = (float)rand() / (float)RAND_MAX; h_dataB[i] = (float)rand() / (float)RAND_MAX; } int firstRun = 1; // Indicates if it's the first execution of the for loop const int useGPU = 1; // When 0, only the CPU is used. When 1, only the GPU is used for (int dataAmount = MAX_DATA_SIZE; dataAmount > 128*256; dataAmount /= 2) { int blockGridWidth = 128; int blockGridHeight = (dataAmount / 256) / blockGridWidth; dim3 blockGridRows(blockGridWidth, blockGridHeight); dim3 threadBlockRows(256, 1); // Start the timer. // We want to measure copying data, running the kernel, and copying the results back to host gettimeofday(&start, NULL); if (useGPU == 0) { // Copy the data to the device cudaMemcpy(d_dataA, h_dataA, sizeof(float) * dataAmount, cudaMemcpyHostToDevice) ; cudaMemcpy(d_dataB, h_dataB, sizeof(float) * dataAmount, cudaMemcpyHostToDevice) ; // Do the multiplication on the GPU multiplyNumbersGPU<<<blockGridRows, threadBlockRows>>>(d_dataA, d_dataB, d_resultC); cudaThreadSynchronize() ; // Copy the data back to the host cudaMemcpy(h_resultC, d_resultC, sizeof(float) * dataAmount, cudaMemcpyDeviceToHost) ; } else { // We're using the CPU only GoldenBrick(h_dataA, h_dataB, h_resultC, dataAmount); } // Stop the timer, print the total round trip execution time. gettimeofday(&end, NULL); gpuTime = myDiffTime(start, end); if (!firstRun || !useGPU) { printf("Elements: %d - convolution time : %f msec - %f Multiplications/sec\n", dataAmount, gpuTime, blockGridHeight * 128 * 256 / (gpuTime * 0.001)); } else { firstRun = 0; // We discard the results of the first run because of the extra overhead incurred // during the first time a kernel is ever executed. dataAmount *= 2; // reset to first run value } } printf("Cleaning up...\n"); cudaFree(d_resultC ) ; cudaFree(d_dataB) ; cudaFree(d_dataA) ; free(h_resultC); free(h_dataB); free(h_dataA); }
#include <hip/hip_runtime.h> #include <stdio.h> #include <sys/time.h> /* This file can be downloaded from supercomputingblog.com. This is part of a series of tutorials that demonstrate how to use CUDA The tutorials will also demonstrate the speed of using CUDA */ // IMPORTANT NOTE: for this data size, your graphics card should have at least 512 megabytes of memory. // If your GPU has less memory, then you will need to decrease this data size. #define MAX_DATA_SIZE 1024*1024*32 // about 32 million elements. // The max data size must be an integer multiple of 128*256, because each block will have 256 threads, // and the block grid width will be 128. These are arbitrary numbers I choose. void get_walltime(double* wcTime) { struct timeval tp; gettimeofday(&tp, NULL); *wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0); } double myDiffTime(struct timeval &start, struct timeval &end) { double d_start, d_end; d_start = (double)(start.tv_sec + start.tv_usec/1000000.0); d_end = (double)(end.tv_sec + end.tv_usec/1000000.0); return (d_end - d_start); } void GoldenBrick(float *pA, float *pB, float *pResult, int count) { for (int i=0; i < count; i++) { //pResult[count] = pA[count] * pB[count]; //pResult[count] = pA[count] * pB[count] / 12.34567; //pResult[count] = sqrt(pA[count] * pB[count] / 12.34567); pResult[count] = sqrt(pA[count] * pB[count] / 12.34567) * sin(pA[count]); } } __global__ void multiplyNumbersGPU(float *pDataA, float *pDataB, float *pResult) { // Because of the simplicity of this tutorial, we are going to assume that // every block has 256 threads. Each thread simply multiplies two numbers, // and then stores the result. // The grid of blocks is 128 blocks long. int tid = (blockIdx.y * 128 * 256) + blockIdx.x * 256 + threadIdx.x; // This gives every thread a unique ID. // By no coincidence, we'll be using this thread ID to determine which data elements to multiply. //pResult[tid] = pDataA[tid] * pDataB[tid]; // Each thread only multiplies one data element. //pResult[tid] = pDataA[tid] * pDataB[tid] / 12.34567; //pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567); pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567) * sin(pDataA[tid]); } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ float *h_dataA, *h_dataB, *h_resultC; float *d_dataA, *d_dataB, *d_resultC; double gpuTime; int i; timeval start, end; printf("Initializing data...\n"); h_dataA = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); h_dataB = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); h_resultC = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); hipMalloc( (void **)&d_dataA, sizeof(float) * MAX_DATA_SIZE) ; hipMalloc( (void **)&d_dataB, sizeof(float) * MAX_DATA_SIZE) ; hipMalloc( (void **)&d_resultC , sizeof(float) * MAX_DATA_SIZE) ; srand(123); for(i = 0; i < MAX_DATA_SIZE; i++) { h_dataA[i] = (float)rand() / (float)RAND_MAX; h_dataB[i] = (float)rand() / (float)RAND_MAX; } int firstRun = 1; // Indicates if it's the first execution of the for loop const int useGPU = 1; // When 0, only the CPU is used. When 1, only the GPU is used for (int dataAmount = MAX_DATA_SIZE; dataAmount > 128*256; dataAmount /= 2) { int blockGridWidth = 128; int blockGridHeight = (dataAmount / 256) / blockGridWidth; dim3 blockGridRows(blockGridWidth, blockGridHeight); dim3 threadBlockRows(256, 1); // Start the timer. // We want to measure copying data, running the kernel, and copying the results back to host gettimeofday(&start, NULL); if (useGPU == 0) { // Copy the data to the device hipMemcpy(d_dataA, h_dataA, sizeof(float) * dataAmount, hipMemcpyHostToDevice) ; hipMemcpy(d_dataB, h_dataB, sizeof(float) * dataAmount, hipMemcpyHostToDevice) ; // Do the multiplication on the GPU multiplyNumbersGPU<<<blockGridRows, threadBlockRows>>>(d_dataA, d_dataB, d_resultC); hipDeviceSynchronize() ; // Copy the data back to the host hipMemcpy(h_resultC, d_resultC, sizeof(float) * dataAmount, hipMemcpyDeviceToHost) ; } else { // We're using the CPU only GoldenBrick(h_dataA, h_dataB, h_resultC, dataAmount); } // Stop the timer, print the total round trip execution time. gettimeofday(&end, NULL); gpuTime = myDiffTime(start, end); if (!firstRun || !useGPU) { printf("Elements: %d - convolution time : %f msec - %f Multiplications/sec\n", dataAmount, gpuTime, blockGridHeight * 128 * 256 / (gpuTime * 0.001)); } else { firstRun = 0; // We discard the results of the first run because of the extra overhead incurred // during the first time a kernel is ever executed. dataAmount *= 2; // reset to first run value } } printf("Cleaning up...\n"); hipFree(d_resultC ) ; hipFree(d_dataB) ; hipFree(d_dataA) ; free(h_resultC); free(h_dataB); free(h_dataA); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <sys/time.h> /* This file can be downloaded from supercomputingblog.com. This is part of a series of tutorials that demonstrate how to use CUDA The tutorials will also demonstrate the speed of using CUDA */ // IMPORTANT NOTE: for this data size, your graphics card should have at least 512 megabytes of memory. // If your GPU has less memory, then you will need to decrease this data size. #define MAX_DATA_SIZE 1024*1024*32 // about 32 million elements. // The max data size must be an integer multiple of 128*256, because each block will have 256 threads, // and the block grid width will be 128. These are arbitrary numbers I choose. void get_walltime(double* wcTime) { struct timeval tp; gettimeofday(&tp, NULL); *wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0); } double myDiffTime(struct timeval &start, struct timeval &end) { double d_start, d_end; d_start = (double)(start.tv_sec + start.tv_usec/1000000.0); d_end = (double)(end.tv_sec + end.tv_usec/1000000.0); return (d_end - d_start); } void GoldenBrick(float *pA, float *pB, float *pResult, int count) { for (int i=0; i < count; i++) { //pResult[count] = pA[count] * pB[count]; //pResult[count] = pA[count] * pB[count] / 12.34567; //pResult[count] = sqrt(pA[count] * pB[count] / 12.34567); pResult[count] = sqrt(pA[count] * pB[count] / 12.34567) * sin(pA[count]); } } __global__ void multiplyNumbersGPU(float *pDataA, float *pDataB, float *pResult) { // Because of the simplicity of this tutorial, we are going to assume that // every block has 256 threads. Each thread simply multiplies two numbers, // and then stores the result. // The grid of blocks is 128 blocks long. int tid = (blockIdx.y * 128 * 256) + blockIdx.x * 256 + threadIdx.x; // This gives every thread a unique ID. // By no coincidence, we'll be using this thread ID to determine which data elements to multiply. //pResult[tid] = pDataA[tid] * pDataB[tid]; // Each thread only multiplies one data element. //pResult[tid] = pDataA[tid] * pDataB[tid] / 12.34567; //pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567); pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567) * sin(pDataA[tid]); } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ float *h_dataA, *h_dataB, *h_resultC; float *d_dataA, *d_dataB, *d_resultC; double gpuTime; int i; timeval start, end; printf("Initializing data...\n"); h_dataA = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); h_dataB = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); h_resultC = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); hipMalloc( (void **)&d_dataA, sizeof(float) * MAX_DATA_SIZE) ; hipMalloc( (void **)&d_dataB, sizeof(float) * MAX_DATA_SIZE) ; hipMalloc( (void **)&d_resultC , sizeof(float) * MAX_DATA_SIZE) ; srand(123); for(i = 0; i < MAX_DATA_SIZE; i++) { h_dataA[i] = (float)rand() / (float)RAND_MAX; h_dataB[i] = (float)rand() / (float)RAND_MAX; } int firstRun = 1; // Indicates if it's the first execution of the for loop const int useGPU = 1; // When 0, only the CPU is used. When 1, only the GPU is used for (int dataAmount = MAX_DATA_SIZE; dataAmount > 128*256; dataAmount /= 2) { int blockGridWidth = 128; int blockGridHeight = (dataAmount / 256) / blockGridWidth; dim3 blockGridRows(blockGridWidth, blockGridHeight); dim3 threadBlockRows(256, 1); // Start the timer. // We want to measure copying data, running the kernel, and copying the results back to host gettimeofday(&start, NULL); if (useGPU == 0) { // Copy the data to the device hipMemcpy(d_dataA, h_dataA, sizeof(float) * dataAmount, hipMemcpyHostToDevice) ; hipMemcpy(d_dataB, h_dataB, sizeof(float) * dataAmount, hipMemcpyHostToDevice) ; // Do the multiplication on the GPU multiplyNumbersGPU<<<blockGridRows, threadBlockRows>>>(d_dataA, d_dataB, d_resultC); hipDeviceSynchronize() ; // Copy the data back to the host hipMemcpy(h_resultC, d_resultC, sizeof(float) * dataAmount, hipMemcpyDeviceToHost) ; } else { // We're using the CPU only GoldenBrick(h_dataA, h_dataB, h_resultC, dataAmount); } // Stop the timer, print the total round trip execution time. gettimeofday(&end, NULL); gpuTime = myDiffTime(start, end); if (!firstRun || !useGPU) { printf("Elements: %d - convolution time : %f msec - %f Multiplications/sec\n", dataAmount, gpuTime, blockGridHeight * 128 * 256 / (gpuTime * 0.001)); } else { firstRun = 0; // We discard the results of the first run because of the extra overhead incurred // during the first time a kernel is ever executed. dataAmount *= 2; // reset to first run value } } printf("Cleaning up...\n"); hipFree(d_resultC ) ; hipFree(d_dataB) ; hipFree(d_dataA) ; free(h_resultC); free(h_dataB); free(h_dataA); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z18multiplyNumbersGPUPfS_S_ .globl _Z18multiplyNumbersGPUPfS_S_ .p2align 8 .type _Z18multiplyNumbersGPUPfS_S_,@function _Z18multiplyNumbersGPUPfS_S_: s_load_b128 s[4:7], s[0:1], 0x0 s_lshl_b32 s2, s15, 15 s_lshl_b32 s3, s14, 8 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_add3_u32 v0, s2, s3, v0 s_mov_b32 s3, exec_lo v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 2, v[0:1] s_waitcnt lgkmcnt(0) v_add_co_u32 v5, vcc_lo, s4, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v6, vcc_lo, s5, v4, vcc_lo v_add_co_u32 v3, vcc_lo, s6, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo global_load_b32 v2, v[5:6], off global_load_b32 v4, v[3:4], off s_waitcnt vmcnt(1) v_and_b32_e32 v3, 0x7fffffff, v2 v_cmpx_ngt_f32_e64 0x48000000, |v2| s_xor_b32 s4, exec_lo, s3 s_cbranch_execz .LBB0_2 s_mov_b32 s2, 0x7fffff v_mov_b32_e32 v7, 0 v_and_or_b32 v15, v3, s2, 0x800000 v_lshrrev_b32_e32 v12, 23, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mad_u64_u32 v[5:6], null, v15, 0xfe5163ab, 0 v_add_nc_u32_e32 v13, 0xffffff88, v12 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_cmp_lt_u32_e32 vcc_lo, 63, v13 v_mad_u64_u32 v[8:9], null, v15, 0x3c439041, v[6:7] v_cndmask_b32_e64 v14, 0, 0xffffffc0, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mov_b32_e32 v6, v9 v_add_nc_u32_e32 v14, v14, v13 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mad_u64_u32 v[9:10], null, v15, 0xdb629599, v[6:7] v_cmp_lt_u32_e64 s2, 31, v14 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e64 v16, 0, 0xffffffe0, s2 v_dual_mov_b32 v6, v10 :: v_dual_cndmask_b32 v5, v9, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v16, v16, v14 v_mad_u64_u32 v[10:11], null, v15, 0xf534ddc0, v[6:7] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_lt_u32_e64 s3, 31, v16 v_mov_b32_e32 v6, v11 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v8, v10, v8, vcc_lo v_mad_u64_u32 v[11:12], null, v15, 0xfc2757d1, v[6:7] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cndmask_b32_e64 v5, v8, v5, s2 v_mov_b32_e32 v6, v12 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[12:13], null, v15, 0x4e441529, v[6:7] v_mov_b32_e32 v6, v13 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[13:14], null, v15, 0xa2f9836e, v[6:7] v_cndmask_b32_e64 v6, 0, 0xffffffe0, s3 v_dual_cndmask_b32 v7, v12, v10 :: v_dual_add_nc_u32 v6, v6, v16 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_dual_cndmask_b32 v13, v13, v11 :: v_dual_cndmask_b32 v12, v14, v12 v_cndmask_b32_e32 v11, v11, v9, vcc_lo v_cmp_eq_u32_e32 vcc_lo, 0, v6 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_cndmask_b32_e64 v10, v13, v7, s2 v_cndmask_b32_e64 v12, v12, v13, s2 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) v_cndmask_b32_e64 v7, v7, v11, s2 v_sub_nc_u32_e32 v13, 32, v6 v_cndmask_b32_e64 v11, v11, v8, s2 v_cndmask_b32_e64 v12, v12, v10, s3 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e64 v10, v10, v7, s3 v_cndmask_b32_e64 v7, v7, v11, s3 v_cndmask_b32_e64 v5, v11, v5, s3 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_alignbit_b32 v14, v12, v10, v13 v_alignbit_b32 v9, v10, v7, v13 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_cndmask_b32_e32 v6, v14, v12, vcc_lo v_alignbit_b32 v12, v7, v5, v13 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e32 v8, v9, v10, vcc_lo v_bfe_u32 v9, v6, 29, 1 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e32 v7, v12, v7, vcc_lo v_alignbit_b32 v10, v6, v8, 30 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_sub_nc_u32_e32 v11, 0, v9 v_alignbit_b32 v8, v8, v7, 30 v_alignbit_b32 v5, v7, v5, 30 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_xor_b32_e32 v10, v10, v11 v_xor_b32_e32 v7, v8, v11 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_xor_b32_e32 v5, v5, v11 v_clz_i32_u32_e32 v12, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_min_u32_e32 v12, 32, v12 v_sub_nc_u32_e32 v8, 31, v12 v_lshlrev_b32_e32 v14, 23, v12 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) v_alignbit_b32 v10, v10, v7, v8 v_alignbit_b32 v5, v7, v5, v8 v_lshrrev_b32_e32 v8, 29, v6 v_alignbit_b32 v7, v10, v5, 9 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_lshlrev_b32_e32 v8, 31, v8 v_lshrrev_b32_e32 v10, 9, v10 v_clz_i32_u32_e32 v11, v7 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_or_b32_e32 v13, 0.5, v8 v_min_u32_e32 v11, 32, v11 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v13, v13, v14 v_sub_nc_u32_e32 v15, 31, v11 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_alignbit_b32 v5, v7, v5, v15 v_or_b32_e32 v7, v10, v13 v_add_lshl_u32 v10, v11, v12, 23 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshrrev_b32_e32 v5, 9, v5 v_mul_f32_e32 v11, 0x3fc90fda, v7 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v5, v5, v10 v_fma_f32 v10, v7, 0x3fc90fda, -v11 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v5, 0x33000000, v5 v_fmamk_f32 v7, v7, 0x33a22168, v10 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_or_b32_e32 v5, v5, v8 v_fmac_f32_e32 v7, 0x3fc90fda, v5 v_lshrrev_b32_e32 v6, 30, v6 s_delay_alu instid0(VALU_DEP_1) v_dual_add_f32 v5, v11, v7 :: v_dual_add_nc_u32 v6, v9, v6 .LBB0_2: s_and_not1_saveexec_b32 s2, s4 v_mul_f32_e64 v5, 0x3f22f983, |v2| s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_rndne_f32_e32 v6, v5 v_fma_f32 v5, v6, 0xbfc90fda, |v2| s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmamk_f32 v5, v6, 0xb3a22168, v5 v_fmamk_f32 v5, v6, 0xa7c234c4, v5 v_cvt_i32_f32_e32 v6, v6 s_or_b32 exec_lo, exec_lo, s2 s_waitcnt vmcnt(0) v_mul_f32_e32 v4, v2, v4 s_mov_b32 s3, 0x4028b0fb s_mov_b32 s2, 0xa8826aa9 v_xor_b32_e32 v3, v3, v2 s_load_b64 s[0:1], s[0:1], 0x10 v_cvt_f64_f32_e32 v[7:8], v4 v_lshlrev_b64 v[0:1], 2, v[0:1] s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_div_scale_f64 v[9:10], null, s[2:3], s[2:3], v[7:8] v_div_scale_f64 v[15:16], vcc_lo, v[7:8], s[2:3], v[7:8] v_rcp_f64_e32 v[11:12], v[9:10] s_waitcnt_depctr 0xfff v_fma_f64 v[13:14], -v[9:10], v[11:12], 1.0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[11:12], v[11:12], v[13:14], v[11:12] v_fma_f64 v[13:14], -v[9:10], v[11:12], 1.0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[11:12], v[11:12], v[13:14], v[11:12] v_mul_f64 v[13:14], v[15:16], v[11:12] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[9:10], -v[9:10], v[13:14], v[15:16] v_div_fmas_f64 v[9:10], v[9:10], v[11:12], v[13:14] s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_div_fixup_f64 v[7:8], v[9:10], s[2:3], v[7:8] s_mov_b32 s2, 0xb94c1982 s_mov_b32 s3, 0x37d75334 v_cmp_gt_f64_e32 vcc_lo, 0x10000000, v[7:8] v_cndmask_b32_e64 v4, 0, 1, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b32_e32 v4, 8, v4 v_ldexp_f64 v[7:8], v[7:8], v4 v_mul_f32_e32 v4, v5, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_rsq_f64_e32 v[9:10], v[7:8] s_waitcnt_depctr 0xfff v_mul_f64 v[11:12], v[7:8], v[9:10] v_mul_f64 v[9:10], v[9:10], 0.5 v_fma_f64 v[13:14], -v[9:10], v[11:12], 0.5 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_fma_f64 v[11:12], v[11:12], v[13:14], v[11:12] v_fma_f64 v[9:10], v[9:10], v[13:14], v[9:10] v_fma_f64 v[13:14], -v[11:12], v[11:12], v[7:8] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[11:12], v[13:14], v[9:10], v[11:12] v_fma_f64 v[13:14], -v[11:12], v[11:12], v[7:8] s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2) v_fma_f64 v[9:10], v[13:14], v[9:10], v[11:12] v_fmaak_f32 v11, s2, v4, 0x3c0881c4 v_and_b32_e32 v13, 1, v6 v_dual_fmaak_f32 v11, v4, v11, 0xbe2aaa9d :: v_dual_lshlrev_b32 v6, 30, v6 v_fmaak_f32 v12, s3, v4, 0xbab64f3b s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_cmp_eq_u32_e64 s2, 0, v13 v_dual_mul_f32 v11, v4, v11 :: v_dual_and_b32 v6, 0x80000000, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_fmaak_f32 v12, v4, v12, 0x3d2aabf7 :: v_dual_fmac_f32 v5, v5, v11 v_fmaak_f32 v12, v4, v12, 0xbf000004 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f32 v4, v4, v12, 1.0 v_cndmask_b32_e64 v4, v4, v5, s2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) v_xor3_b32 v3, v3, v6, v4 v_cndmask_b32_e64 v4, 0, 0xffffff80, vcc_lo v_cmp_class_f32_e64 vcc_lo, v2, 0x1f8 v_cndmask_b32_e32 v5, 0x7fc00000, v3, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_ldexp_f64 v[2:3], v[9:10], v4 v_cmp_class_f64_e64 vcc_lo, v[7:8], 0x260 v_cvt_f64_f32_e32 v[4:5], v5 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) v_dual_cndmask_b32 v3, v3, v8 :: v_dual_cndmask_b32 v2, v2, v7 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo v_mul_f64 v[2:3], v[2:3], v[4:5] s_delay_alu instid0(VALU_DEP_1) v_cvt_f32_f64_e32 v2, v[2:3] global_store_b32 v[0:1], v2, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18multiplyNumbersGPUPfS_S_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 17 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z18multiplyNumbersGPUPfS_S_, .Lfunc_end0-_Z18multiplyNumbersGPUPfS_S_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18multiplyNumbersGPUPfS_S_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z18multiplyNumbersGPUPfS_S_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 17 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <sys/time.h> /* This file can be downloaded from supercomputingblog.com. This is part of a series of tutorials that demonstrate how to use CUDA The tutorials will also demonstrate the speed of using CUDA */ // IMPORTANT NOTE: for this data size, your graphics card should have at least 512 megabytes of memory. // If your GPU has less memory, then you will need to decrease this data size. #define MAX_DATA_SIZE 1024*1024*32 // about 32 million elements. // The max data size must be an integer multiple of 128*256, because each block will have 256 threads, // and the block grid width will be 128. These are arbitrary numbers I choose. void get_walltime(double* wcTime) { struct timeval tp; gettimeofday(&tp, NULL); *wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0); } double myDiffTime(struct timeval &start, struct timeval &end) { double d_start, d_end; d_start = (double)(start.tv_sec + start.tv_usec/1000000.0); d_end = (double)(end.tv_sec + end.tv_usec/1000000.0); return (d_end - d_start); } void GoldenBrick(float *pA, float *pB, float *pResult, int count) { for (int i=0; i < count; i++) { //pResult[count] = pA[count] * pB[count]; //pResult[count] = pA[count] * pB[count] / 12.34567; //pResult[count] = sqrt(pA[count] * pB[count] / 12.34567); pResult[count] = sqrt(pA[count] * pB[count] / 12.34567) * sin(pA[count]); } } __global__ void multiplyNumbersGPU(float *pDataA, float *pDataB, float *pResult) { // Because of the simplicity of this tutorial, we are going to assume that // every block has 256 threads. Each thread simply multiplies two numbers, // and then stores the result. // The grid of blocks is 128 blocks long. int tid = (blockIdx.y * 128 * 256) + blockIdx.x * 256 + threadIdx.x; // This gives every thread a unique ID. // By no coincidence, we'll be using this thread ID to determine which data elements to multiply. //pResult[tid] = pDataA[tid] * pDataB[tid]; // Each thread only multiplies one data element. //pResult[tid] = pDataA[tid] * pDataB[tid] / 12.34567; //pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567); pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567) * sin(pDataA[tid]); } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ float *h_dataA, *h_dataB, *h_resultC; float *d_dataA, *d_dataB, *d_resultC; double gpuTime; int i; timeval start, end; printf("Initializing data...\n"); h_dataA = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); h_dataB = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); h_resultC = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); hipMalloc( (void **)&d_dataA, sizeof(float) * MAX_DATA_SIZE) ; hipMalloc( (void **)&d_dataB, sizeof(float) * MAX_DATA_SIZE) ; hipMalloc( (void **)&d_resultC , sizeof(float) * MAX_DATA_SIZE) ; srand(123); for(i = 0; i < MAX_DATA_SIZE; i++) { h_dataA[i] = (float)rand() / (float)RAND_MAX; h_dataB[i] = (float)rand() / (float)RAND_MAX; } int firstRun = 1; // Indicates if it's the first execution of the for loop const int useGPU = 1; // When 0, only the CPU is used. When 1, only the GPU is used for (int dataAmount = MAX_DATA_SIZE; dataAmount > 128*256; dataAmount /= 2) { int blockGridWidth = 128; int blockGridHeight = (dataAmount / 256) / blockGridWidth; dim3 blockGridRows(blockGridWidth, blockGridHeight); dim3 threadBlockRows(256, 1); // Start the timer. // We want to measure copying data, running the kernel, and copying the results back to host gettimeofday(&start, NULL); if (useGPU == 0) { // Copy the data to the device hipMemcpy(d_dataA, h_dataA, sizeof(float) * dataAmount, hipMemcpyHostToDevice) ; hipMemcpy(d_dataB, h_dataB, sizeof(float) * dataAmount, hipMemcpyHostToDevice) ; // Do the multiplication on the GPU multiplyNumbersGPU<<<blockGridRows, threadBlockRows>>>(d_dataA, d_dataB, d_resultC); hipDeviceSynchronize() ; // Copy the data back to the host hipMemcpy(h_resultC, d_resultC, sizeof(float) * dataAmount, hipMemcpyDeviceToHost) ; } else { // We're using the CPU only GoldenBrick(h_dataA, h_dataB, h_resultC, dataAmount); } // Stop the timer, print the total round trip execution time. gettimeofday(&end, NULL); gpuTime = myDiffTime(start, end); if (!firstRun || !useGPU) { printf("Elements: %d - convolution time : %f msec - %f Multiplications/sec\n", dataAmount, gpuTime, blockGridHeight * 128 * 256 / (gpuTime * 0.001)); } else { firstRun = 0; // We discard the results of the first run because of the extra overhead incurred // during the first time a kernel is ever executed. dataAmount *= 2; // reset to first run value } } printf("Cleaning up...\n"); hipFree(d_resultC ) ; hipFree(d_dataB) ; hipFree(d_dataA) ; free(h_resultC); free(h_dataB); free(h_dataA); }
.text .file "cuda_tutorial11.hip" .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z12get_walltimePd .LCPI0_0: .quad 0x412e848000000000 # double 1.0E+6 .text .globl _Z12get_walltimePd .p2align 4, 0x90 .type _Z12get_walltimePd,@function _Z12get_walltimePd: # @_Z12get_walltimePd .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $16, %rsp .cfi_def_cfa_offset 32 .cfi_offset %rbx, -16 movq %rdi, %rbx movq %rsp, %rdi xorl %esi, %esi callq gettimeofday cvtsi2sdq (%rsp), %xmm0 cvtsi2sdq 8(%rsp), %xmm1 divsd .LCPI0_0(%rip), %xmm1 addsd %xmm0, %xmm1 movsd %xmm1, (%rbx) addq $16, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z12get_walltimePd, .Lfunc_end0-_Z12get_walltimePd .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z10myDiffTimeR7timevalS0_ .LCPI1_0: .quad 0x412e848000000000 # double 1.0E+6 .text .globl _Z10myDiffTimeR7timevalS0_ .p2align 4, 0x90 .type _Z10myDiffTimeR7timevalS0_,@function _Z10myDiffTimeR7timevalS0_: # @_Z10myDiffTimeR7timevalS0_ .cfi_startproc # %bb.0: cvtsi2sdq (%rdi), %xmm0 cvtsi2sdq 8(%rdi), %xmm1 movsd .LCPI1_0(%rip), %xmm2 # xmm2 = mem[0],zero divsd %xmm2, %xmm1 addsd %xmm0, %xmm1 cvtsi2sdq (%rsi), %xmm3 xorps %xmm0, %xmm0 cvtsi2sdq 8(%rsi), %xmm0 divsd %xmm2, %xmm0 addsd %xmm3, %xmm0 subsd %xmm1, %xmm0 retq .Lfunc_end1: .size _Z10myDiffTimeR7timevalS0_, .Lfunc_end1-_Z10myDiffTimeR7timevalS0_ .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z11GoldenBrickPfS_S_i .LCPI2_0: .quad 0x4028b0fba8826aa9 # double 12.34567 .LCPI2_1: .quad 0x0000000000000000 # double 0 .text .globl _Z11GoldenBrickPfS_S_i .p2align 4, 0x90 .type _Z11GoldenBrickPfS_S_i,@function _Z11GoldenBrickPfS_S_i: # @_Z11GoldenBrickPfS_S_i .cfi_startproc # %bb.0: testl %ecx, %ecx jle .LBB2_7 # %bb.1: # %.lr.ph pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $16, %rsp .cfi_def_cfa_offset 64 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %ecx, %ebx movq %rdx, %r14 movq %rsi, %r15 movq %rdi, %r12 movl %ecx, %r13d jmp .LBB2_2 .p2align 4, 0x90 .LBB2_3: # in Loop: Header=BB2_2 Depth=1 sqrtsd %xmm0, %xmm0 .LBB2_5: # %.split # in Loop: Header=BB2_2 Depth=1 movsd %xmm0, 8(%rsp) # 8-byte Spill movss (%r12,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 callq sin mulsd 8(%rsp), %xmm0 # 8-byte Folded Reload cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%r14,%r13,4) decl %ebx je .LBB2_6 .LBB2_2: # =>This Inner Loop Header: Depth=1 movss (%r12,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero mulss (%r15,%r13,4), %xmm0 cvtss2sd %xmm0, %xmm0 divsd .LCPI2_0(%rip), %xmm0 ucomisd .LCPI2_1(%rip), %xmm0 jae .LBB2_3 # %bb.4: # %call.sqrt # in Loop: Header=BB2_2 Depth=1 callq sqrt jmp .LBB2_5 .LBB2_6: addq $16, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .LBB2_7: # %._crit_edge retq .Lfunc_end2: .size _Z11GoldenBrickPfS_S_i, .Lfunc_end2-_Z11GoldenBrickPfS_S_i .cfi_endproc # -- End function .globl _Z33__device_stub__multiplyNumbersGPUPfS_S_ # -- Begin function _Z33__device_stub__multiplyNumbersGPUPfS_S_ .p2align 4, 0x90 .type _Z33__device_stub__multiplyNumbersGPUPfS_S_,@function _Z33__device_stub__multiplyNumbersGPUPfS_S_: # @_Z33__device_stub__multiplyNumbersGPUPfS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18multiplyNumbersGPUPfS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end3: .size _Z33__device_stub__multiplyNumbersGPUPfS_S_, .Lfunc_end3-_Z33__device_stub__multiplyNumbersGPUPfS_S_ .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI4_0: .long 0x30000000 # float 4.65661287E-10 .LCPI4_3: .long 0x7f800000 # float +Inf .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI4_1: .quad 0x4028b0fba8826aa9 # double 12.34567 .LCPI4_4: .quad 0x412e848000000000 # double 1.0E+6 .LCPI4_5: .quad 0x3f50624dd2f1a9fc # double 0.001 .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 .LCPI4_2: .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $88, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $.Lstr, %edi callq puts@PLT movl $134217728, %edi # imm = 0x8000000 callq malloc movq %rax, %rbx movl $134217728, %edi # imm = 0x8000000 callq malloc movq %rax, %r14 leaq 48(%rsp), %rdi movl $134217728, %esi # imm = 0x8000000 callq hipMalloc leaq 40(%rsp), %rdi movl $134217728, %esi # imm = 0x8000000 callq hipMalloc leaq 32(%rsp), %rdi movl $134217728, %esi # imm = 0x8000000 callq hipMalloc movl $123, %edi callq srand xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_1: # =>This Inner Loop Header: Depth=1 callq rand xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movss .LCPI4_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss %xmm1, %xmm0 movss %xmm0, (%rbx,%r15,4) callq rand xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss .LCPI4_0(%rip), %xmm0 movss %xmm0, (%r14,%r15,4) incq %r15 cmpq $33554432, %r15 # imm = 0x2000000 jne .LBB4_1 # %bb.2: # %.preheader movl $33554432, %r15d # imm = 0x2000000 xorl %r13d, %r13d leaq 56(%rsp), %r12 jmp .LBB4_3 .p2align 4, 0x90 .LBB4_12: # in Loop: Header=BB4_3 Depth=1 addl %r15d, %r15d .LBB4_13: # in Loop: Header=BB4_3 Depth=1 movl %r15d, %eax shrl %eax movb $1, %r13b cmpl $65537, %r15d # imm = 0x10001 movl %eax, %r15d jbe .LBB4_14 .LBB4_3: # %.lr.ph.i # =>This Loop Header: Depth=1 # Child Loop BB4_4 Depth 2 leaq 72(%rsp), %rdi xorl %esi, %esi callq gettimeofday movss .LCPI4_3(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero movl %r15d, %eax movss (%rbx,%rax,4), %xmm2 # xmm2 = mem[0],zero,zero,zero movss (%r14,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero mulss %xmm2, %xmm0 xorps %xmm3, %xmm3 cvtss2sd %xmm0, %xmm3 divsd .LCPI4_1(%rip), %xmm3 xorps %xmm4, %xmm4 cvtss2sd %xmm2, %xmm4 andps .LCPI4_2(%rip), %xmm2 movl %r15d, %ebp xorps %xmm0, %xmm0 .p2align 4, 0x90 .LBB4_4: # Parent Loop BB4_3 Depth=1 # => This Inner Loop Header: Depth=2 ucomisd %xmm3, %xmm0 ja .LBB4_5 .LBB4_7: # %cdce.end # in Loop: Header=BB4_4 Depth=2 ucomiss %xmm1, %xmm2 jae .LBB4_8 # %bb.9: # %cdce.end33 # in Loop: Header=BB4_4 Depth=2 decl %ebp jne .LBB4_4 jmp .LBB4_10 .LBB4_5: # %cdce.call # in Loop: Header=BB4_4 Depth=2 ucomisd %xmm0, %xmm3 jae .LBB4_7 # %bb.6: # %call.sqrt # in Loop: Header=BB4_4 Depth=2 movapd %xmm3, %xmm0 movaps %xmm2, 16(%rsp) # 16-byte Spill movsd %xmm3, 8(%rsp) # 8-byte Spill movsd %xmm4, (%rsp) # 8-byte Spill callq sqrt movsd (%rsp), %xmm4 # 8-byte Reload # xmm4 = mem[0],zero movsd 8(%rsp), %xmm3 # 8-byte Reload # xmm3 = mem[0],zero movaps 16(%rsp), %xmm2 # 16-byte Reload movss .LCPI4_3(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero xorpd %xmm0, %xmm0 jmp .LBB4_7 .LBB4_8: # %cdce.call32 # in Loop: Header=BB4_4 Depth=2 movaps %xmm4, %xmm0 movaps %xmm2, 16(%rsp) # 16-byte Spill movsd %xmm3, 8(%rsp) # 8-byte Spill movsd %xmm4, (%rsp) # 8-byte Spill callq sin movsd (%rsp), %xmm4 # 8-byte Reload # xmm4 = mem[0],zero movsd 8(%rsp), %xmm3 # 8-byte Reload # xmm3 = mem[0],zero movaps 16(%rsp), %xmm2 # 16-byte Reload movss .LCPI4_3(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero xorps %xmm0, %xmm0 decl %ebp jne .LBB4_4 .p2align 4, 0x90 .LBB4_10: # %_Z11GoldenBrickPfS_S_i.exit # in Loop: Header=BB4_3 Depth=1 movq %r12, %rdi xorl %esi, %esi callq gettimeofday testb $1, %r13b je .LBB4_12 # %bb.11: # in Loop: Header=BB4_3 Depth=1 xorps %xmm1, %xmm1 cvtsi2sdq 64(%rsp), %xmm1 xorps %xmm0, %xmm0 cvtsi2sdq 56(%rsp), %xmm0 movsd .LCPI4_4(%rip), %xmm2 # xmm2 = mem[0],zero divsd %xmm2, %xmm1 addsd %xmm1, %xmm0 xorps %xmm1, %xmm1 cvtsi2sdq 80(%rsp), %xmm1 divsd %xmm2, %xmm1 xorps %xmm2, %xmm2 cvtsi2sdq 72(%rsp), %xmm2 addsd %xmm1, %xmm2 movl %r15d, %eax andl $2147450880, %eax # imm = 0x7FFF8000 xorps %xmm1, %xmm1 cvtsi2sd %eax, %xmm1 subsd %xmm2, %xmm0 movapd %xmm0, %xmm2 mulsd .LCPI4_5(%rip), %xmm2 divsd %xmm2, %xmm1 movl $.L.str.1, %edi movl %r15d, %esi movb $2, %al callq printf jmp .LBB4_13 .LBB4_14: movl $.Lstr.1, %edi callq puts@PLT movq 32(%rsp), %rdi callq hipFree movq 40(%rsp), %rdi callq hipFree movq 48(%rsp), %rdi callq hipFree movq %r14, %rdi callq free movq %rbx, %rdi callq free xorl %eax, %eax addq $88, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size main, .Lfunc_end4-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB5_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB5_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18multiplyNumbersGPUPfS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end5: .size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB6_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB6_2: retq .Lfunc_end6: .size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor .cfi_endproc # -- End function .type _Z18multiplyNumbersGPUPfS_S_,@object # @_Z18multiplyNumbersGPUPfS_S_ .section .rodata,"a",@progbits .globl _Z18multiplyNumbersGPUPfS_S_ .p2align 3, 0x0 _Z18multiplyNumbersGPUPfS_S_: .quad _Z33__device_stub__multiplyNumbersGPUPfS_S_ .size _Z18multiplyNumbersGPUPfS_S_, 8 .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "Elements: %d - convolution time : %f msec - %f Multiplications/sec\n" .size .L.str.1, 68 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z18multiplyNumbersGPUPfS_S_" .size .L__unnamed_1, 29 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Initializing data..." .size .Lstr, 21 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "Cleaning up..." .size .Lstr.1, 15 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z33__device_stub__multiplyNumbersGPUPfS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z18multiplyNumbersGPUPfS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0015c6dc_00000000-6_cuda_tutorial11.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z12get_walltimePd .type _Z12get_walltimePd, @function _Z12get_walltimePd: .LFB2057: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $32, %rsp .cfi_def_cfa_offset 48 movq %rdi, %rbx movq %fs:40, %rax movq %rax, 24(%rsp) xorl %eax, %eax movq %rsp, %rdi movl $0, %esi call gettimeofday@PLT pxor %xmm0, %xmm0 cvtsi2sdq 8(%rsp), %xmm0 divsd .LC0(%rip), %xmm0 pxor %xmm1, %xmm1 cvtsi2sdq (%rsp), %xmm1 addsd %xmm1, %xmm0 movsd %xmm0, (%rbx) movq 24(%rsp), %rax subq %fs:40, %rax jne .L6 addq $32, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L6: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z12get_walltimePd, .-_Z12get_walltimePd .globl _Z10myDiffTimeR7timevalS0_ .type _Z10myDiffTimeR7timevalS0_, @function _Z10myDiffTimeR7timevalS0_: .LFB2058: .cfi_startproc endbr64 pxor %xmm0, %xmm0 cvtsi2sdq 8(%rsi), %xmm0 movsd .LC0(%rip), %xmm2 divsd %xmm2, %xmm0 pxor %xmm1, %xmm1 cvtsi2sdq (%rsi), %xmm1 addsd %xmm1, %xmm0 pxor %xmm1, %xmm1 cvtsi2sdq 8(%rdi), %xmm1 divsd %xmm2, %xmm1 pxor %xmm2, %xmm2 cvtsi2sdq (%rdi), %xmm2 addsd %xmm2, %xmm1 subsd %xmm1, %xmm0 ret .cfi_endproc .LFE2058: .size _Z10myDiffTimeR7timevalS0_, .-_Z10myDiffTimeR7timevalS0_ .globl _Z11GoldenBrickPfS_S_i .type _Z11GoldenBrickPfS_S_i, @function _Z11GoldenBrickPfS_S_i: .LFB2059: .cfi_startproc endbr64 testl %ecx, %ecx jle .L18 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $24, %rsp .cfi_def_cfa_offset 80 movl %ecx, %ebp movslq %ecx, %rax salq $2, %rax leaq (%rdi,%rax), %r14 leaq (%rsi,%rax), %r13 leaq (%rdx,%rax), %r12 movl $0, %ebx .L13: movl (%r14), %r15d movd %r15d, %xmm0 mulss 0(%r13), %xmm0 cvtss2sd %xmm0, %xmm0 divsd .LC1(%rip), %xmm0 pxor %xmm1, %xmm1 ucomisd %xmm0, %xmm1 ja .L16 sqrtsd %xmm0, %xmm0 movsd %xmm0, 8(%rsp) .L12: movd %r15d, %xmm0 call sinf@PLT cvtss2sd %xmm0, %xmm0 mulsd 8(%rsp), %xmm0 cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%r12) addl $1, %ebx cmpl %ebx, %ebp jne .L13 addq $24, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L16: .cfi_restore_state call sqrt@PLT movsd %xmm0, 8(%rsp) jmp .L12 .L18: .cfi_def_cfa_offset 8 .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 .cfi_restore 13 .cfi_restore 14 .cfi_restore 15 ret .cfi_endproc .LFE2059: .size _Z11GoldenBrickPfS_S_i, .-_Z11GoldenBrickPfS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC3: .string "Initializing data...\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC6: .string "Elements: %d - convolution time : %f msec - %f Multiplications/sec\n" .section .rodata.str1.1 .LC7: .string "Cleaning up...\n" .text .globl main .type main, @function main: .LFB2060: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $104, %rsp .cfi_def_cfa_offset 160 movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq .LC3(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $134217728, %edi call malloc@PLT movq %rax, %rbx movl $134217728, %edi call malloc@PLT movq %rax, %rbp movl $134217728, %edi call malloc@PLT movq %rax, %r14 leaq 24(%rsp), %rdi movl $134217728, %esi call cudaMalloc@PLT leaq 32(%rsp), %rdi movl $134217728, %esi call cudaMalloc@PLT leaq 40(%rsp), %rdi movl $134217728, %esi call cudaMalloc@PLT movl $123, %edi call srand@PLT movl $0, %r12d .L22: call rand@PLT pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 mulss .LC4(%rip), %xmm0 movss %xmm0, (%rbx,%r12) call rand@PLT pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 mulss .LC4(%rip), %xmm0 movss %xmm0, 0(%rbp,%r12) addq $4, %r12 cmpq $134217728, %r12 jne .L22 movl $1, %r13d movl $33554432, %r12d leaq 48(%rsp), %r15 leaq 64(%rsp), %rax movq %rax, 8(%rsp) jmp .L25 .L23: movl $0, %r13d .L25: movl $0, %esi movq %r15, %rdi call gettimeofday@PLT movl %r12d, %ecx movq %r14, %rdx movq %rbp, %rsi movq %rbx, %rdi call _Z11GoldenBrickPfS_S_i movl $0, %esi movq 8(%rsp), %rdi call gettimeofday@PLT testl %r13d, %r13d jne .L23 movq 8(%rsp), %rsi movq %r15, %rdi call _Z10myDiffTimeR7timevalS0_ leal 32767(%r12), %eax testl %r12d, %r12d cmovns %r12d, %eax andl $-32768, %eax pxor %xmm1, %xmm1 cvtsi2sdl %eax, %xmm1 movapd %xmm0, %xmm2 mulsd .LC5(%rip), %xmm2 divsd %xmm2, %xmm1 movl %r12d, %edx leaq .LC6(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT cmpl $65537, %r12d jle .L24 movl %r12d, %eax shrl $31, %eax addl %r12d, %eax sarl %eax movl %eax, %r12d jmp .L23 .L24: leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq %r14, %rdi call free@PLT movq %rbp, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq 88(%rsp), %rax subq %fs:40, %rax jne .L29 movl $0, %eax addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size main, .-main .globl _Z42__device_stub__Z18multiplyNumbersGPUPfS_S_PfS_S_ .type _Z42__device_stub__Z18multiplyNumbersGPUPfS_S_PfS_S_, @function _Z42__device_stub__Z18multiplyNumbersGPUPfS_S_PfS_S_: .LFB2085: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L34 .L30: movq 120(%rsp), %rax subq %fs:40, %rax jne .L35 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L34: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z18multiplyNumbersGPUPfS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L30 .L35: call __stack_chk_fail@PLT .cfi_endproc .LFE2085: .size _Z42__device_stub__Z18multiplyNumbersGPUPfS_S_PfS_S_, .-_Z42__device_stub__Z18multiplyNumbersGPUPfS_S_PfS_S_ .globl _Z18multiplyNumbersGPUPfS_S_ .type _Z18multiplyNumbersGPUPfS_S_, @function _Z18multiplyNumbersGPUPfS_S_: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z18multiplyNumbersGPUPfS_S_PfS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _Z18multiplyNumbersGPUPfS_S_, .-_Z18multiplyNumbersGPUPfS_S_ .section .rodata.str1.1 .LC8: .string "_Z18multiplyNumbersGPUPfS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq _Z18multiplyNumbersGPUPfS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 0 .long 1093567616 .align 8 .LC1: .long -1467848023 .long 1076408571 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC4: .long 805306368 .section .rodata.cst8 .align 8 .LC5: .long -755914244 .long 1062232653 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "cuda_tutorial11.hip" .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z12get_walltimePd .LCPI0_0: .quad 0x412e848000000000 # double 1.0E+6 .text .globl _Z12get_walltimePd .p2align 4, 0x90 .type _Z12get_walltimePd,@function _Z12get_walltimePd: # @_Z12get_walltimePd .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $16, %rsp .cfi_def_cfa_offset 32 .cfi_offset %rbx, -16 movq %rdi, %rbx movq %rsp, %rdi xorl %esi, %esi callq gettimeofday cvtsi2sdq (%rsp), %xmm0 cvtsi2sdq 8(%rsp), %xmm1 divsd .LCPI0_0(%rip), %xmm1 addsd %xmm0, %xmm1 movsd %xmm1, (%rbx) addq $16, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z12get_walltimePd, .Lfunc_end0-_Z12get_walltimePd .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z10myDiffTimeR7timevalS0_ .LCPI1_0: .quad 0x412e848000000000 # double 1.0E+6 .text .globl _Z10myDiffTimeR7timevalS0_ .p2align 4, 0x90 .type _Z10myDiffTimeR7timevalS0_,@function _Z10myDiffTimeR7timevalS0_: # @_Z10myDiffTimeR7timevalS0_ .cfi_startproc # %bb.0: cvtsi2sdq (%rdi), %xmm0 cvtsi2sdq 8(%rdi), %xmm1 movsd .LCPI1_0(%rip), %xmm2 # xmm2 = mem[0],zero divsd %xmm2, %xmm1 addsd %xmm0, %xmm1 cvtsi2sdq (%rsi), %xmm3 xorps %xmm0, %xmm0 cvtsi2sdq 8(%rsi), %xmm0 divsd %xmm2, %xmm0 addsd %xmm3, %xmm0 subsd %xmm1, %xmm0 retq .Lfunc_end1: .size _Z10myDiffTimeR7timevalS0_, .Lfunc_end1-_Z10myDiffTimeR7timevalS0_ .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z11GoldenBrickPfS_S_i .LCPI2_0: .quad 0x4028b0fba8826aa9 # double 12.34567 .LCPI2_1: .quad 0x0000000000000000 # double 0 .text .globl _Z11GoldenBrickPfS_S_i .p2align 4, 0x90 .type _Z11GoldenBrickPfS_S_i,@function _Z11GoldenBrickPfS_S_i: # @_Z11GoldenBrickPfS_S_i .cfi_startproc # %bb.0: testl %ecx, %ecx jle .LBB2_7 # %bb.1: # %.lr.ph pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $16, %rsp .cfi_def_cfa_offset 64 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %ecx, %ebx movq %rdx, %r14 movq %rsi, %r15 movq %rdi, %r12 movl %ecx, %r13d jmp .LBB2_2 .p2align 4, 0x90 .LBB2_3: # in Loop: Header=BB2_2 Depth=1 sqrtsd %xmm0, %xmm0 .LBB2_5: # %.split # in Loop: Header=BB2_2 Depth=1 movsd %xmm0, 8(%rsp) # 8-byte Spill movss (%r12,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 callq sin mulsd 8(%rsp), %xmm0 # 8-byte Folded Reload cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%r14,%r13,4) decl %ebx je .LBB2_6 .LBB2_2: # =>This Inner Loop Header: Depth=1 movss (%r12,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero mulss (%r15,%r13,4), %xmm0 cvtss2sd %xmm0, %xmm0 divsd .LCPI2_0(%rip), %xmm0 ucomisd .LCPI2_1(%rip), %xmm0 jae .LBB2_3 # %bb.4: # %call.sqrt # in Loop: Header=BB2_2 Depth=1 callq sqrt jmp .LBB2_5 .LBB2_6: addq $16, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .LBB2_7: # %._crit_edge retq .Lfunc_end2: .size _Z11GoldenBrickPfS_S_i, .Lfunc_end2-_Z11GoldenBrickPfS_S_i .cfi_endproc # -- End function .globl _Z33__device_stub__multiplyNumbersGPUPfS_S_ # -- Begin function _Z33__device_stub__multiplyNumbersGPUPfS_S_ .p2align 4, 0x90 .type _Z33__device_stub__multiplyNumbersGPUPfS_S_,@function _Z33__device_stub__multiplyNumbersGPUPfS_S_: # @_Z33__device_stub__multiplyNumbersGPUPfS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18multiplyNumbersGPUPfS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end3: .size _Z33__device_stub__multiplyNumbersGPUPfS_S_, .Lfunc_end3-_Z33__device_stub__multiplyNumbersGPUPfS_S_ .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI4_0: .long 0x30000000 # float 4.65661287E-10 .LCPI4_3: .long 0x7f800000 # float +Inf .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI4_1: .quad 0x4028b0fba8826aa9 # double 12.34567 .LCPI4_4: .quad 0x412e848000000000 # double 1.0E+6 .LCPI4_5: .quad 0x3f50624dd2f1a9fc # double 0.001 .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 .LCPI4_2: .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $88, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $.Lstr, %edi callq puts@PLT movl $134217728, %edi # imm = 0x8000000 callq malloc movq %rax, %rbx movl $134217728, %edi # imm = 0x8000000 callq malloc movq %rax, %r14 leaq 48(%rsp), %rdi movl $134217728, %esi # imm = 0x8000000 callq hipMalloc leaq 40(%rsp), %rdi movl $134217728, %esi # imm = 0x8000000 callq hipMalloc leaq 32(%rsp), %rdi movl $134217728, %esi # imm = 0x8000000 callq hipMalloc movl $123, %edi callq srand xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_1: # =>This Inner Loop Header: Depth=1 callq rand xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movss .LCPI4_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss %xmm1, %xmm0 movss %xmm0, (%rbx,%r15,4) callq rand xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss .LCPI4_0(%rip), %xmm0 movss %xmm0, (%r14,%r15,4) incq %r15 cmpq $33554432, %r15 # imm = 0x2000000 jne .LBB4_1 # %bb.2: # %.preheader movl $33554432, %r15d # imm = 0x2000000 xorl %r13d, %r13d leaq 56(%rsp), %r12 jmp .LBB4_3 .p2align 4, 0x90 .LBB4_12: # in Loop: Header=BB4_3 Depth=1 addl %r15d, %r15d .LBB4_13: # in Loop: Header=BB4_3 Depth=1 movl %r15d, %eax shrl %eax movb $1, %r13b cmpl $65537, %r15d # imm = 0x10001 movl %eax, %r15d jbe .LBB4_14 .LBB4_3: # %.lr.ph.i # =>This Loop Header: Depth=1 # Child Loop BB4_4 Depth 2 leaq 72(%rsp), %rdi xorl %esi, %esi callq gettimeofday movss .LCPI4_3(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero movl %r15d, %eax movss (%rbx,%rax,4), %xmm2 # xmm2 = mem[0],zero,zero,zero movss (%r14,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero mulss %xmm2, %xmm0 xorps %xmm3, %xmm3 cvtss2sd %xmm0, %xmm3 divsd .LCPI4_1(%rip), %xmm3 xorps %xmm4, %xmm4 cvtss2sd %xmm2, %xmm4 andps .LCPI4_2(%rip), %xmm2 movl %r15d, %ebp xorps %xmm0, %xmm0 .p2align 4, 0x90 .LBB4_4: # Parent Loop BB4_3 Depth=1 # => This Inner Loop Header: Depth=2 ucomisd %xmm3, %xmm0 ja .LBB4_5 .LBB4_7: # %cdce.end # in Loop: Header=BB4_4 Depth=2 ucomiss %xmm1, %xmm2 jae .LBB4_8 # %bb.9: # %cdce.end33 # in Loop: Header=BB4_4 Depth=2 decl %ebp jne .LBB4_4 jmp .LBB4_10 .LBB4_5: # %cdce.call # in Loop: Header=BB4_4 Depth=2 ucomisd %xmm0, %xmm3 jae .LBB4_7 # %bb.6: # %call.sqrt # in Loop: Header=BB4_4 Depth=2 movapd %xmm3, %xmm0 movaps %xmm2, 16(%rsp) # 16-byte Spill movsd %xmm3, 8(%rsp) # 8-byte Spill movsd %xmm4, (%rsp) # 8-byte Spill callq sqrt movsd (%rsp), %xmm4 # 8-byte Reload # xmm4 = mem[0],zero movsd 8(%rsp), %xmm3 # 8-byte Reload # xmm3 = mem[0],zero movaps 16(%rsp), %xmm2 # 16-byte Reload movss .LCPI4_3(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero xorpd %xmm0, %xmm0 jmp .LBB4_7 .LBB4_8: # %cdce.call32 # in Loop: Header=BB4_4 Depth=2 movaps %xmm4, %xmm0 movaps %xmm2, 16(%rsp) # 16-byte Spill movsd %xmm3, 8(%rsp) # 8-byte Spill movsd %xmm4, (%rsp) # 8-byte Spill callq sin movsd (%rsp), %xmm4 # 8-byte Reload # xmm4 = mem[0],zero movsd 8(%rsp), %xmm3 # 8-byte Reload # xmm3 = mem[0],zero movaps 16(%rsp), %xmm2 # 16-byte Reload movss .LCPI4_3(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero xorps %xmm0, %xmm0 decl %ebp jne .LBB4_4 .p2align 4, 0x90 .LBB4_10: # %_Z11GoldenBrickPfS_S_i.exit # in Loop: Header=BB4_3 Depth=1 movq %r12, %rdi xorl %esi, %esi callq gettimeofday testb $1, %r13b je .LBB4_12 # %bb.11: # in Loop: Header=BB4_3 Depth=1 xorps %xmm1, %xmm1 cvtsi2sdq 64(%rsp), %xmm1 xorps %xmm0, %xmm0 cvtsi2sdq 56(%rsp), %xmm0 movsd .LCPI4_4(%rip), %xmm2 # xmm2 = mem[0],zero divsd %xmm2, %xmm1 addsd %xmm1, %xmm0 xorps %xmm1, %xmm1 cvtsi2sdq 80(%rsp), %xmm1 divsd %xmm2, %xmm1 xorps %xmm2, %xmm2 cvtsi2sdq 72(%rsp), %xmm2 addsd %xmm1, %xmm2 movl %r15d, %eax andl $2147450880, %eax # imm = 0x7FFF8000 xorps %xmm1, %xmm1 cvtsi2sd %eax, %xmm1 subsd %xmm2, %xmm0 movapd %xmm0, %xmm2 mulsd .LCPI4_5(%rip), %xmm2 divsd %xmm2, %xmm1 movl $.L.str.1, %edi movl %r15d, %esi movb $2, %al callq printf jmp .LBB4_13 .LBB4_14: movl $.Lstr.1, %edi callq puts@PLT movq 32(%rsp), %rdi callq hipFree movq 40(%rsp), %rdi callq hipFree movq 48(%rsp), %rdi callq hipFree movq %r14, %rdi callq free movq %rbx, %rdi callq free xorl %eax, %eax addq $88, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size main, .Lfunc_end4-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB5_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB5_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18multiplyNumbersGPUPfS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end5: .size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB6_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB6_2: retq .Lfunc_end6: .size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor .cfi_endproc # -- End function .type _Z18multiplyNumbersGPUPfS_S_,@object # @_Z18multiplyNumbersGPUPfS_S_ .section .rodata,"a",@progbits .globl _Z18multiplyNumbersGPUPfS_S_ .p2align 3, 0x0 _Z18multiplyNumbersGPUPfS_S_: .quad _Z33__device_stub__multiplyNumbersGPUPfS_S_ .size _Z18multiplyNumbersGPUPfS_S_, 8 .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "Elements: %d - convolution time : %f msec - %f Multiplications/sec\n" .size .L.str.1, 68 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z18multiplyNumbersGPUPfS_S_" .size .L__unnamed_1, 29 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Initializing data..." .size .Lstr, 21 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "Cleaning up..." .size .Lstr.1, 15 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z33__device_stub__multiplyNumbersGPUPfS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z18multiplyNumbersGPUPfS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> #include <device_launch_parameters.h> constexpr auto PI = 3.14f; //umplerea a doua matrici cu date __global__ void fill_array2D(float *a, float *b, int N, int M) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < N && col < M) { a[row*N + col] = powf(sinf(2 * PI * row / N), 2) + powf(cosf(2 * PI * col / M), 2); //dupa egal se poate pune orice conditie avem nevoie b[row*N + col] = powf(cosf(2 * PI * row / N), 2) + powf(sinf(2 * PI * col / M), 2); //dupa egal se poate pune orice conditie avem nevoie } } //umplerea a doi vectori cu date __global__ void fill_array1D(float *a,float*b, int N, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int row = idx / N; int col = idx % N; if (row < N && col < M) { a[row*N + col] = powf(sinf(2 * PI*row/N), 2) + powf(cosf(2 * PI*col/N), 2); b[row*N + col] = powf(cosf(2 * PI*row/M), 2) + powf(sinf(2 * PI*col/M), 2); } } //suma a doi vectori, intr-un vector c __global__ void sum_vectors1D(float *a, float *b, float *c, int N, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int row = idx/N; int col = idx%N; if (row < N && col < M) { c[row*N + col] = a[row*N + col] + b[row*N + col]; } } //suma a doua matrici, intr-o matrice c __global__ void sum_vectors2D(float *a, float *b, float *c, int N,int M) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < N && col < M) { c[row*N + col] = a[row*N + col]+ b[row*N + col]; } } int main() { float *a_h, *a_d, *b_h, *b_d, *c_h, *c_d; const int N = 512; const int M = 512; size_t size = N * M * sizeof(float); //alocare host a_h = (float*)malloc(size); b_h = (float*)malloc(size); c_h = (float*)malloc(size); //alocare device cudaMalloc((void**)&a_d, size); cudaMalloc((void**)&b_d, size); cudaMalloc((void**)&c_d, size); //dimensiuni grid si threads dim3 grid2D(16,16,1); dim3 threads2D(32,32,1); dim3 grid1D(512, 1, 1); dim3 threads1D(512, 1, 1); //fill arrays fill_array2D <<< grid2D, threads2D >>> (a_d, b_d,N, M); sum_vectors2D <<< grid2D, threads2D >>> (a_d, b_d, c_d, N, M); fill_array1D <<< grid1D, threads1D >>> (a_d, b_d, N, M); sum_vectors1D <<< grid1D, threads1D >>> (a_d, b_d, c_d, N, M); //copy device data to host cudaMemcpy(a_h, a_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(b_h, b_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); for (int i = 0; i < N; ++i) { for (int j = 0; j < M; ++j) { std::cout << c_h[i*N + j]<<" "; } std::cout << std::endl; } //cuda cleanup free(a_h); free(b_h); free(c_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); return 0; }
.file "tmpxft_001b9c69_00000000-6_CUDA_Suma_Populare.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z36__device_stub__Z12fill_array2DPfS_iiPfS_ii .type _Z36__device_stub__Z12fill_array2DPfS_iiPfS_ii, @function _Z36__device_stub__Z12fill_array2DPfS_iiPfS_ii: .LFB3694: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12fill_array2DPfS_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE3694: .size _Z36__device_stub__Z12fill_array2DPfS_iiPfS_ii, .-_Z36__device_stub__Z12fill_array2DPfS_iiPfS_ii .globl _Z12fill_array2DPfS_ii .type _Z12fill_array2DPfS_ii, @function _Z12fill_array2DPfS_ii: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z12fill_array2DPfS_iiPfS_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _Z12fill_array2DPfS_ii, .-_Z12fill_array2DPfS_ii .globl _Z36__device_stub__Z12fill_array1DPfS_iiPfS_ii .type _Z36__device_stub__Z12fill_array1DPfS_iiPfS_ii, @function _Z36__device_stub__Z12fill_array1DPfS_iiPfS_ii: .LFB3696: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12fill_array1DPfS_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE3696: .size _Z36__device_stub__Z12fill_array1DPfS_iiPfS_ii, .-_Z36__device_stub__Z12fill_array1DPfS_iiPfS_ii .globl _Z12fill_array1DPfS_ii .type _Z12fill_array1DPfS_ii, @function _Z12fill_array1DPfS_ii: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z12fill_array1DPfS_iiPfS_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _Z12fill_array1DPfS_ii, .-_Z12fill_array1DPfS_ii .globl _Z39__device_stub__Z13sum_vectors1DPfS_S_iiPfS_S_ii .type _Z39__device_stub__Z13sum_vectors1DPfS_S_iiPfS_S_ii, @function _Z39__device_stub__Z13sum_vectors1DPfS_S_iiPfS_S_ii: .LFB3698: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L23 .L19: movq 136(%rsp), %rax subq %fs:40, %rax jne .L24 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z13sum_vectors1DPfS_S_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L19 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE3698: .size _Z39__device_stub__Z13sum_vectors1DPfS_S_iiPfS_S_ii, .-_Z39__device_stub__Z13sum_vectors1DPfS_S_iiPfS_S_ii .globl _Z13sum_vectors1DPfS_S_ii .type _Z13sum_vectors1DPfS_S_ii, @function _Z13sum_vectors1DPfS_S_ii: .LFB3699: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z13sum_vectors1DPfS_S_iiPfS_S_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3699: .size _Z13sum_vectors1DPfS_S_ii, .-_Z13sum_vectors1DPfS_S_ii .globl _Z39__device_stub__Z13sum_vectors2DPfS_S_iiPfS_S_ii .type _Z39__device_stub__Z13sum_vectors2DPfS_S_iiPfS_S_ii, @function _Z39__device_stub__Z13sum_vectors2DPfS_S_iiPfS_S_ii: .LFB3700: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L31 .L27: movq 136(%rsp), %rax subq %fs:40, %rax jne .L32 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L31: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z13sum_vectors2DPfS_S_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L27 .L32: call __stack_chk_fail@PLT .cfi_endproc .LFE3700: .size _Z39__device_stub__Z13sum_vectors2DPfS_S_iiPfS_S_ii, .-_Z39__device_stub__Z13sum_vectors2DPfS_S_iiPfS_S_ii .globl _Z13sum_vectors2DPfS_S_ii .type _Z13sum_vectors2DPfS_S_ii, @function _Z13sum_vectors2DPfS_S_ii: .LFB3701: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z13sum_vectors2DPfS_S_iiPfS_S_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3701: .size _Z13sum_vectors2DPfS_S_ii, .-_Z13sum_vectors2DPfS_S_ii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string " " .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $104, %rsp .cfi_def_cfa_offset 160 movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax movl $1048576, %edi call malloc@PLT movq %rax, (%rsp) movl $1048576, %edi call malloc@PLT movq %rax, 8(%rsp) movl $1048576, %edi call malloc@PLT movq %rax, %r15 leaq 16(%rsp), %rdi movl $1048576, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $1048576, %esi call cudaMalloc@PLT leaq 32(%rsp), %rdi movl $1048576, %esi call cudaMalloc@PLT movl $16, 40(%rsp) movl $16, 44(%rsp) movl $1, 48(%rsp) movl $32, 52(%rsp) movl $32, 56(%rsp) movl $1, 60(%rsp) movl $512, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $512, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) movl $0, %r9d movl $0, %r8d movq 52(%rsp), %rdx movl $1, %ecx movq 40(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L50 .L36: movl 60(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 52(%rsp), %rdx movq 40(%rsp), %rdi movl 48(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L51 .L37: movl 84(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 76(%rsp), %rdx movq 64(%rsp), %rdi movl 72(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L52 .L38: movl 84(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 76(%rsp), %rdx movq 64(%rsp), %rdi movl 72(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L53 .L39: movl $2, %ecx movl $1048576, %edx movq 16(%rsp), %rsi movq (%rsp), %rdi call cudaMemcpy@PLT movl $2, %ecx movl $1048576, %edx movq 24(%rsp), %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $2, %ecx movl $1048576, %edx movq 32(%rsp), %rsi movq %r15, %rdi call cudaMemcpy@PLT leaq 2048(%r15), %rbp leaq 1050624(%r15), %r14 leaq _ZSt4cout(%rip), %r12 leaq .LC0(%rip), %r13 jmp .L40 .L50: movl $512, %ecx movl $512, %edx movq 24(%rsp), %rsi movq 16(%rsp), %rdi call _Z36__device_stub__Z12fill_array2DPfS_iiPfS_ii jmp .L36 .L51: movl $512, %r8d movl $512, %ecx movq 32(%rsp), %rdx movq 24(%rsp), %rsi movq 16(%rsp), %rdi call _Z39__device_stub__Z13sum_vectors2DPfS_S_iiPfS_S_ii jmp .L37 .L52: movl $512, %ecx movl $512, %edx movq 24(%rsp), %rsi movq 16(%rsp), %rdi call _Z36__device_stub__Z12fill_array1DPfS_iiPfS_ii jmp .L38 .L53: movl $512, %r8d movl $512, %ecx movq 32(%rsp), %rdx movq 24(%rsp), %rsi movq 16(%rsp), %rdi call _Z39__device_stub__Z13sum_vectors1DPfS_S_iiPfS_S_ii jmp .L39 .L55: movq 88(%rsp), %rax subq %fs:40, %rax jne .L54 call _ZSt16__throw_bad_castv@PLT .L54: call __stack_chk_fail@PLT .L56: movzbl 67(%rbx), %eax .L45: movsbl %al, %esi movq %r12, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT addq $2048, %rbp cmpq %r14, %rbp je .L46 .L40: leaq -2048(%rbp), %rbx .L41: pxor %xmm0, %xmm0 cvtss2sd (%rbx), %xmm0 movq %r12, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi movl $1, %edx movq %r13, %rsi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT addq $4, %rbx cmpq %rbx, %rbp jne .L41 movq (%r12), %rax movq -24(%rax), %rax movq 240(%r12,%rax), %rbx testq %rbx, %rbx je .L55 cmpb $0, 56(%rbx) jne .L56 movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) jmp .L45 .L46: movq (%rsp), %rdi call free@PLT movq 8(%rsp), %rdi call free@PLT movq %r15, %rdi call free@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT movq 88(%rsp), %rax subq %fs:40, %rax jne .L57 movl $0, %eax addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L57: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z13sum_vectors2DPfS_S_ii" .LC2: .string "_Z13sum_vectors1DPfS_S_ii" .LC3: .string "_Z12fill_array1DPfS_ii" .LC4: .string "_Z12fill_array2DPfS_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3703: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z13sum_vectors2DPfS_S_ii(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z13sum_vectors1DPfS_S_ii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z12fill_array1DPfS_ii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z12fill_array2DPfS_ii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3703: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> #include <device_launch_parameters.h> constexpr auto PI = 3.14f; //umplerea a doua matrici cu date __global__ void fill_array2D(float *a, float *b, int N, int M) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < N && col < M) { a[row*N + col] = powf(sinf(2 * PI * row / N), 2) + powf(cosf(2 * PI * col / M), 2); //dupa egal se poate pune orice conditie avem nevoie b[row*N + col] = powf(cosf(2 * PI * row / N), 2) + powf(sinf(2 * PI * col / M), 2); //dupa egal se poate pune orice conditie avem nevoie } } //umplerea a doi vectori cu date __global__ void fill_array1D(float *a,float*b, int N, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int row = idx / N; int col = idx % N; if (row < N && col < M) { a[row*N + col] = powf(sinf(2 * PI*row/N), 2) + powf(cosf(2 * PI*col/N), 2); b[row*N + col] = powf(cosf(2 * PI*row/M), 2) + powf(sinf(2 * PI*col/M), 2); } } //suma a doi vectori, intr-un vector c __global__ void sum_vectors1D(float *a, float *b, float *c, int N, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int row = idx/N; int col = idx%N; if (row < N && col < M) { c[row*N + col] = a[row*N + col] + b[row*N + col]; } } //suma a doua matrici, intr-o matrice c __global__ void sum_vectors2D(float *a, float *b, float *c, int N,int M) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < N && col < M) { c[row*N + col] = a[row*N + col]+ b[row*N + col]; } } int main() { float *a_h, *a_d, *b_h, *b_d, *c_h, *c_d; const int N = 512; const int M = 512; size_t size = N * M * sizeof(float); //alocare host a_h = (float*)malloc(size); b_h = (float*)malloc(size); c_h = (float*)malloc(size); //alocare device cudaMalloc((void**)&a_d, size); cudaMalloc((void**)&b_d, size); cudaMalloc((void**)&c_d, size); //dimensiuni grid si threads dim3 grid2D(16,16,1); dim3 threads2D(32,32,1); dim3 grid1D(512, 1, 1); dim3 threads1D(512, 1, 1); //fill arrays fill_array2D <<< grid2D, threads2D >>> (a_d, b_d,N, M); sum_vectors2D <<< grid2D, threads2D >>> (a_d, b_d, c_d, N, M); fill_array1D <<< grid1D, threads1D >>> (a_d, b_d, N, M); sum_vectors1D <<< grid1D, threads1D >>> (a_d, b_d, c_d, N, M); //copy device data to host cudaMemcpy(a_h, a_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(b_h, b_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); for (int i = 0; i < N; ++i) { for (int j = 0; j < M; ++j) { std::cout << c_h[i*N + j]<<" "; } std::cout << std::endl; } //cuda cleanup free(a_h); free(b_h); free(c_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); return 0; }
#include <hip/hip_runtime.h> #include <iostream> constexpr auto PI = 3.14f; //umplerea a doua matrici cu date __global__ void fill_array2D(float *a, float *b, int N, int M) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < N && col < M) { a[row*N + col] = powf(sinf(2 * PI * row / N), 2) + powf(cosf(2 * PI * col / M), 2); //dupa egal se poate pune orice conditie avem nevoie b[row*N + col] = powf(cosf(2 * PI * row / N), 2) + powf(sinf(2 * PI * col / M), 2); //dupa egal se poate pune orice conditie avem nevoie } } //umplerea a doi vectori cu date __global__ void fill_array1D(float *a,float*b, int N, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int row = idx / N; int col = idx % N; if (row < N && col < M) { a[row*N + col] = powf(sinf(2 * PI*row/N), 2) + powf(cosf(2 * PI*col/N), 2); b[row*N + col] = powf(cosf(2 * PI*row/M), 2) + powf(sinf(2 * PI*col/M), 2); } } //suma a doi vectori, intr-un vector c __global__ void sum_vectors1D(float *a, float *b, float *c, int N, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int row = idx/N; int col = idx%N; if (row < N && col < M) { c[row*N + col] = a[row*N + col] + b[row*N + col]; } } //suma a doua matrici, intr-o matrice c __global__ void sum_vectors2D(float *a, float *b, float *c, int N,int M) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < N && col < M) { c[row*N + col] = a[row*N + col]+ b[row*N + col]; } } int main() { float *a_h, *a_d, *b_h, *b_d, *c_h, *c_d; const int N = 512; const int M = 512; size_t size = N * M * sizeof(float); //alocare host a_h = (float*)malloc(size); b_h = (float*)malloc(size); c_h = (float*)malloc(size); //alocare device hipMalloc((void**)&a_d, size); hipMalloc((void**)&b_d, size); hipMalloc((void**)&c_d, size); //dimensiuni grid si threads dim3 grid2D(16,16,1); dim3 threads2D(32,32,1); dim3 grid1D(512, 1, 1); dim3 threads1D(512, 1, 1); //fill arrays fill_array2D <<< grid2D, threads2D >>> (a_d, b_d,N, M); sum_vectors2D <<< grid2D, threads2D >>> (a_d, b_d, c_d, N, M); fill_array1D <<< grid1D, threads1D >>> (a_d, b_d, N, M); sum_vectors1D <<< grid1D, threads1D >>> (a_d, b_d, c_d, N, M); //copy device data to host hipMemcpy(a_h, a_d, size, hipMemcpyDeviceToHost); hipMemcpy(b_h, b_d, size, hipMemcpyDeviceToHost); hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost); for (int i = 0; i < N; ++i) { for (int j = 0; j < M; ++j) { std::cout << c_h[i*N + j]<<" "; } std::cout << std::endl; } //cuda cleanup free(a_h); free(b_h); free(c_h); hipFree(a_d); hipFree(b_d); hipFree(c_d); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <iostream> constexpr auto PI = 3.14f; //umplerea a doua matrici cu date __global__ void fill_array2D(float *a, float *b, int N, int M) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < N && col < M) { a[row*N + col] = powf(sinf(2 * PI * row / N), 2) + powf(cosf(2 * PI * col / M), 2); //dupa egal se poate pune orice conditie avem nevoie b[row*N + col] = powf(cosf(2 * PI * row / N), 2) + powf(sinf(2 * PI * col / M), 2); //dupa egal se poate pune orice conditie avem nevoie } } //umplerea a doi vectori cu date __global__ void fill_array1D(float *a,float*b, int N, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int row = idx / N; int col = idx % N; if (row < N && col < M) { a[row*N + col] = powf(sinf(2 * PI*row/N), 2) + powf(cosf(2 * PI*col/N), 2); b[row*N + col] = powf(cosf(2 * PI*row/M), 2) + powf(sinf(2 * PI*col/M), 2); } } //suma a doi vectori, intr-un vector c __global__ void sum_vectors1D(float *a, float *b, float *c, int N, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int row = idx/N; int col = idx%N; if (row < N && col < M) { c[row*N + col] = a[row*N + col] + b[row*N + col]; } } //suma a doua matrici, intr-o matrice c __global__ void sum_vectors2D(float *a, float *b, float *c, int N,int M) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < N && col < M) { c[row*N + col] = a[row*N + col]+ b[row*N + col]; } } int main() { float *a_h, *a_d, *b_h, *b_d, *c_h, *c_d; const int N = 512; const int M = 512; size_t size = N * M * sizeof(float); //alocare host a_h = (float*)malloc(size); b_h = (float*)malloc(size); c_h = (float*)malloc(size); //alocare device hipMalloc((void**)&a_d, size); hipMalloc((void**)&b_d, size); hipMalloc((void**)&c_d, size); //dimensiuni grid si threads dim3 grid2D(16,16,1); dim3 threads2D(32,32,1); dim3 grid1D(512, 1, 1); dim3 threads1D(512, 1, 1); //fill arrays fill_array2D <<< grid2D, threads2D >>> (a_d, b_d,N, M); sum_vectors2D <<< grid2D, threads2D >>> (a_d, b_d, c_d, N, M); fill_array1D <<< grid1D, threads1D >>> (a_d, b_d, N, M); sum_vectors1D <<< grid1D, threads1D >>> (a_d, b_d, c_d, N, M); //copy device data to host hipMemcpy(a_h, a_d, size, hipMemcpyDeviceToHost); hipMemcpy(b_h, b_d, size, hipMemcpyDeviceToHost); hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost); for (int i = 0; i < N; ++i) { for (int j = 0; j < M; ++j) { std::cout << c_h[i*N + j]<<" "; } std::cout << std::endl; } //cuda cleanup free(a_h); free(b_h); free(c_h); hipFree(a_d); hipFree(b_d); hipFree(c_d); return 0; }
.text .file "CUDA_Suma_Populare.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z27__device_stub__fill_array2DPfS_ii # -- Begin function _Z27__device_stub__fill_array2DPfS_ii .p2align 4, 0x90 .type _Z27__device_stub__fill_array2DPfS_ii,@function _Z27__device_stub__fill_array2DPfS_ii: # @_Z27__device_stub__fill_array2DPfS_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12fill_array2DPfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z27__device_stub__fill_array2DPfS_ii, .Lfunc_end0-_Z27__device_stub__fill_array2DPfS_ii .cfi_endproc # -- End function .globl _Z27__device_stub__fill_array1DPfS_ii # -- Begin function _Z27__device_stub__fill_array1DPfS_ii .p2align 4, 0x90 .type _Z27__device_stub__fill_array1DPfS_ii,@function _Z27__device_stub__fill_array1DPfS_ii: # @_Z27__device_stub__fill_array1DPfS_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12fill_array1DPfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z27__device_stub__fill_array1DPfS_ii, .Lfunc_end1-_Z27__device_stub__fill_array1DPfS_ii .cfi_endproc # -- End function .globl _Z28__device_stub__sum_vectors1DPfS_S_ii # -- Begin function _Z28__device_stub__sum_vectors1DPfS_S_ii .p2align 4, 0x90 .type _Z28__device_stub__sum_vectors1DPfS_S_ii,@function _Z28__device_stub__sum_vectors1DPfS_S_ii: # @_Z28__device_stub__sum_vectors1DPfS_S_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z13sum_vectors1DPfS_S_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end2: .size _Z28__device_stub__sum_vectors1DPfS_S_ii, .Lfunc_end2-_Z28__device_stub__sum_vectors1DPfS_S_ii .cfi_endproc # -- End function .globl _Z28__device_stub__sum_vectors2DPfS_S_ii # -- Begin function _Z28__device_stub__sum_vectors2DPfS_S_ii .p2align 4, 0x90 .type _Z28__device_stub__sum_vectors2DPfS_S_ii,@function _Z28__device_stub__sum_vectors2DPfS_S_ii: # @_Z28__device_stub__sum_vectors2DPfS_S_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z13sum_vectors2DPfS_S_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end3: .size _Z28__device_stub__sum_vectors2DPfS_S_ii, .Lfunc_end3-_Z28__device_stub__sum_vectors2DPfS_S_ii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $152, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movabsq $137438953504, %r12 # imm = 0x2000000020 movabsq $68719476752, %r13 # imm = 0x1000000010 movl $1048576, %edi # imm = 0x100000 callq malloc movq %rax, %rbx movl $1048576, %edi # imm = 0x100000 callq malloc movq %rax, %r14 movl $1048576, %edi # imm = 0x100000 callq malloc movq %rax, %r15 leaq 96(%rsp), %rdi movl $1048576, %esi # imm = 0x100000 callq hipMalloc leaq 88(%rsp), %rdi movl $1048576, %esi # imm = 0x100000 callq hipMalloc leaq 104(%rsp), %rdi movl $1048576, %esi # imm = 0x100000 callq hipMalloc movq %r13, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_2 # %bb.1: movq 96(%rsp), %rax movq 88(%rsp), %rcx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) movl $512, 8(%rsp) # imm = 0x200 movl $512, 4(%rsp) # imm = 0x200 leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 8(%rsp), %rax movq %rax, 128(%rsp) leaq 4(%rsp), %rax movq %rax, 136(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 56(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z12fill_array2DPfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 64(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_2: movq %r13, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_4 # %bb.3: movq 96(%rsp), %rax movq 88(%rsp), %rcx movq 104(%rsp), %rdx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) movq %rdx, 56(%rsp) movl $512, 4(%rsp) # imm = 0x200 movl $512, 84(%rsp) # imm = 0x200 leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 4(%rsp), %rax movq %rax, 136(%rsp) leaq 84(%rsp), %rax movq %rax, 144(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z13sum_vectors2DPfS_S_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_4: movabsq $4294967808, %r12 # imm = 0x100000200 movq %r12, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_6 # %bb.5: movq 96(%rsp), %rax movq 88(%rsp), %rcx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) movl $512, 8(%rsp) # imm = 0x200 movl $512, 4(%rsp) # imm = 0x200 leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 8(%rsp), %rax movq %rax, 128(%rsp) leaq 4(%rsp), %rax movq %rax, 136(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 56(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z12fill_array1DPfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 64(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_6: movq %r12, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_8 # %bb.7: movq 96(%rsp), %rax movq 88(%rsp), %rcx movq 104(%rsp), %rdx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) movq %rdx, 56(%rsp) movl $512, 4(%rsp) # imm = 0x200 movl $512, 84(%rsp) # imm = 0x200 leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 4(%rsp), %rax movq %rax, 136(%rsp) leaq 84(%rsp), %rax movq %rax, 144(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z13sum_vectors1DPfS_S_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_8: movq 96(%rsp), %rsi movl $1048576, %edx # imm = 0x100000 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movq 88(%rsp), %rsi movl $1048576, %edx # imm = 0x100000 movq %r14, %rdi movl $2, %ecx callq hipMemcpy movq 104(%rsp), %rsi movl $1048576, %edx # imm = 0x100000 movq %r15, %rdi movl $2, %ecx callq hipMemcpy xorl %r13d, %r13d movq %r15, %rbp jmp .LBB4_9 .p2align 4, 0x90 .LBB4_14: # in Loop: Header=BB4_9 Depth=1 movq %r12, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r12), %rax movq %r12, %rdi movl $10, %esi callq *48(%rax) .LBB4_15: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit # in Loop: Header=BB4_9 Depth=1 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv incq %r13 addq $2048, %rbp # imm = 0x800 cmpq $512, %r13 # imm = 0x200 je .LBB4_16 .LBB4_9: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB4_10 Depth 2 xorl %r12d, %r12d .p2align 4, 0x90 .LBB4_10: # Parent Loop BB4_9 Depth=1 # => This Inner Loop Header: Depth=2 movss (%rbp,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ movl $.L.str, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l incq %r12 cmpq $512, %r12 # imm = 0x200 jne .LBB4_10 # %bb.11: # in Loop: Header=BB4_9 Depth=1 movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r12 testq %r12, %r12 je .LBB4_17 # %bb.12: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i # in Loop: Header=BB4_9 Depth=1 cmpb $0, 56(%r12) je .LBB4_14 # %bb.13: # in Loop: Header=BB4_9 Depth=1 movzbl 67(%r12), %eax jmp .LBB4_15 .LBB4_16: movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free movq 96(%rsp), %rdi callq hipFree movq 88(%rsp), %rdi callq hipFree movq 104(%rsp), %rdi callq hipFree xorl %eax, %eax addq $152, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB4_17: .cfi_def_cfa_offset 208 callq _ZSt16__throw_bad_castv .Lfunc_end4: .size main, .Lfunc_end4-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB5_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB5_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12fill_array2DPfS_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12fill_array1DPfS_ii, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z13sum_vectors1DPfS_S_ii, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z13sum_vectors2DPfS_S_ii, %esi movl $.L__unnamed_4, %edx movl $.L__unnamed_4, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end5: .size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB6_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB6_2: retq .Lfunc_end6: .size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor .cfi_endproc # -- End function .type _Z12fill_array2DPfS_ii,@object # @_Z12fill_array2DPfS_ii .section .rodata,"a",@progbits .globl _Z12fill_array2DPfS_ii .p2align 3, 0x0 _Z12fill_array2DPfS_ii: .quad _Z27__device_stub__fill_array2DPfS_ii .size _Z12fill_array2DPfS_ii, 8 .type _Z12fill_array1DPfS_ii,@object # @_Z12fill_array1DPfS_ii .globl _Z12fill_array1DPfS_ii .p2align 3, 0x0 _Z12fill_array1DPfS_ii: .quad _Z27__device_stub__fill_array1DPfS_ii .size _Z12fill_array1DPfS_ii, 8 .type _Z13sum_vectors1DPfS_S_ii,@object # @_Z13sum_vectors1DPfS_S_ii .globl _Z13sum_vectors1DPfS_S_ii .p2align 3, 0x0 _Z13sum_vectors1DPfS_S_ii: .quad _Z28__device_stub__sum_vectors1DPfS_S_ii .size _Z13sum_vectors1DPfS_S_ii, 8 .type _Z13sum_vectors2DPfS_S_ii,@object # @_Z13sum_vectors2DPfS_S_ii .globl _Z13sum_vectors2DPfS_S_ii .p2align 3, 0x0 _Z13sum_vectors2DPfS_S_ii: .quad _Z28__device_stub__sum_vectors2DPfS_S_ii .size _Z13sum_vectors2DPfS_S_ii, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz " " .size .L.str, 2 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z12fill_array2DPfS_ii" .size .L__unnamed_1, 23 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z12fill_array1DPfS_ii" .size .L__unnamed_2, 23 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z13sum_vectors1DPfS_S_ii" .size .L__unnamed_3, 26 .type .L__unnamed_4,@object # @3 .L__unnamed_4: .asciz "_Z13sum_vectors2DPfS_S_ii" .size .L__unnamed_4, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__fill_array2DPfS_ii .addrsig_sym _Z27__device_stub__fill_array1DPfS_ii .addrsig_sym _Z28__device_stub__sum_vectors1DPfS_S_ii .addrsig_sym _Z28__device_stub__sum_vectors2DPfS_S_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12fill_array2DPfS_ii .addrsig_sym _Z12fill_array1DPfS_ii .addrsig_sym _Z13sum_vectors1DPfS_S_ii .addrsig_sym _Z13sum_vectors2DPfS_S_ii .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001b9c69_00000000-6_CUDA_Suma_Populare.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z36__device_stub__Z12fill_array2DPfS_iiPfS_ii .type _Z36__device_stub__Z12fill_array2DPfS_iiPfS_ii, @function _Z36__device_stub__Z12fill_array2DPfS_iiPfS_ii: .LFB3694: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12fill_array2DPfS_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE3694: .size _Z36__device_stub__Z12fill_array2DPfS_iiPfS_ii, .-_Z36__device_stub__Z12fill_array2DPfS_iiPfS_ii .globl _Z12fill_array2DPfS_ii .type _Z12fill_array2DPfS_ii, @function _Z12fill_array2DPfS_ii: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z12fill_array2DPfS_iiPfS_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _Z12fill_array2DPfS_ii, .-_Z12fill_array2DPfS_ii .globl _Z36__device_stub__Z12fill_array1DPfS_iiPfS_ii .type _Z36__device_stub__Z12fill_array1DPfS_iiPfS_ii, @function _Z36__device_stub__Z12fill_array1DPfS_iiPfS_ii: .LFB3696: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12fill_array1DPfS_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE3696: .size _Z36__device_stub__Z12fill_array1DPfS_iiPfS_ii, .-_Z36__device_stub__Z12fill_array1DPfS_iiPfS_ii .globl _Z12fill_array1DPfS_ii .type _Z12fill_array1DPfS_ii, @function _Z12fill_array1DPfS_ii: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z12fill_array1DPfS_iiPfS_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _Z12fill_array1DPfS_ii, .-_Z12fill_array1DPfS_ii .globl _Z39__device_stub__Z13sum_vectors1DPfS_S_iiPfS_S_ii .type _Z39__device_stub__Z13sum_vectors1DPfS_S_iiPfS_S_ii, @function _Z39__device_stub__Z13sum_vectors1DPfS_S_iiPfS_S_ii: .LFB3698: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L23 .L19: movq 136(%rsp), %rax subq %fs:40, %rax jne .L24 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z13sum_vectors1DPfS_S_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L19 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE3698: .size _Z39__device_stub__Z13sum_vectors1DPfS_S_iiPfS_S_ii, .-_Z39__device_stub__Z13sum_vectors1DPfS_S_iiPfS_S_ii .globl _Z13sum_vectors1DPfS_S_ii .type _Z13sum_vectors1DPfS_S_ii, @function _Z13sum_vectors1DPfS_S_ii: .LFB3699: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z13sum_vectors1DPfS_S_iiPfS_S_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3699: .size _Z13sum_vectors1DPfS_S_ii, .-_Z13sum_vectors1DPfS_S_ii .globl _Z39__device_stub__Z13sum_vectors2DPfS_S_iiPfS_S_ii .type _Z39__device_stub__Z13sum_vectors2DPfS_S_iiPfS_S_ii, @function _Z39__device_stub__Z13sum_vectors2DPfS_S_iiPfS_S_ii: .LFB3700: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L31 .L27: movq 136(%rsp), %rax subq %fs:40, %rax jne .L32 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L31: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z13sum_vectors2DPfS_S_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L27 .L32: call __stack_chk_fail@PLT .cfi_endproc .LFE3700: .size _Z39__device_stub__Z13sum_vectors2DPfS_S_iiPfS_S_ii, .-_Z39__device_stub__Z13sum_vectors2DPfS_S_iiPfS_S_ii .globl _Z13sum_vectors2DPfS_S_ii .type _Z13sum_vectors2DPfS_S_ii, @function _Z13sum_vectors2DPfS_S_ii: .LFB3701: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z13sum_vectors2DPfS_S_iiPfS_S_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3701: .size _Z13sum_vectors2DPfS_S_ii, .-_Z13sum_vectors2DPfS_S_ii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string " " .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $104, %rsp .cfi_def_cfa_offset 160 movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax movl $1048576, %edi call malloc@PLT movq %rax, (%rsp) movl $1048576, %edi call malloc@PLT movq %rax, 8(%rsp) movl $1048576, %edi call malloc@PLT movq %rax, %r15 leaq 16(%rsp), %rdi movl $1048576, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $1048576, %esi call cudaMalloc@PLT leaq 32(%rsp), %rdi movl $1048576, %esi call cudaMalloc@PLT movl $16, 40(%rsp) movl $16, 44(%rsp) movl $1, 48(%rsp) movl $32, 52(%rsp) movl $32, 56(%rsp) movl $1, 60(%rsp) movl $512, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $512, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) movl $0, %r9d movl $0, %r8d movq 52(%rsp), %rdx movl $1, %ecx movq 40(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L50 .L36: movl 60(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 52(%rsp), %rdx movq 40(%rsp), %rdi movl 48(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L51 .L37: movl 84(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 76(%rsp), %rdx movq 64(%rsp), %rdi movl 72(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L52 .L38: movl 84(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 76(%rsp), %rdx movq 64(%rsp), %rdi movl 72(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L53 .L39: movl $2, %ecx movl $1048576, %edx movq 16(%rsp), %rsi movq (%rsp), %rdi call cudaMemcpy@PLT movl $2, %ecx movl $1048576, %edx movq 24(%rsp), %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $2, %ecx movl $1048576, %edx movq 32(%rsp), %rsi movq %r15, %rdi call cudaMemcpy@PLT leaq 2048(%r15), %rbp leaq 1050624(%r15), %r14 leaq _ZSt4cout(%rip), %r12 leaq .LC0(%rip), %r13 jmp .L40 .L50: movl $512, %ecx movl $512, %edx movq 24(%rsp), %rsi movq 16(%rsp), %rdi call _Z36__device_stub__Z12fill_array2DPfS_iiPfS_ii jmp .L36 .L51: movl $512, %r8d movl $512, %ecx movq 32(%rsp), %rdx movq 24(%rsp), %rsi movq 16(%rsp), %rdi call _Z39__device_stub__Z13sum_vectors2DPfS_S_iiPfS_S_ii jmp .L37 .L52: movl $512, %ecx movl $512, %edx movq 24(%rsp), %rsi movq 16(%rsp), %rdi call _Z36__device_stub__Z12fill_array1DPfS_iiPfS_ii jmp .L38 .L53: movl $512, %r8d movl $512, %ecx movq 32(%rsp), %rdx movq 24(%rsp), %rsi movq 16(%rsp), %rdi call _Z39__device_stub__Z13sum_vectors1DPfS_S_iiPfS_S_ii jmp .L39 .L55: movq 88(%rsp), %rax subq %fs:40, %rax jne .L54 call _ZSt16__throw_bad_castv@PLT .L54: call __stack_chk_fail@PLT .L56: movzbl 67(%rbx), %eax .L45: movsbl %al, %esi movq %r12, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT addq $2048, %rbp cmpq %r14, %rbp je .L46 .L40: leaq -2048(%rbp), %rbx .L41: pxor %xmm0, %xmm0 cvtss2sd (%rbx), %xmm0 movq %r12, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi movl $1, %edx movq %r13, %rsi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT addq $4, %rbx cmpq %rbx, %rbp jne .L41 movq (%r12), %rax movq -24(%rax), %rax movq 240(%r12,%rax), %rbx testq %rbx, %rbx je .L55 cmpb $0, 56(%rbx) jne .L56 movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) jmp .L45 .L46: movq (%rsp), %rdi call free@PLT movq 8(%rsp), %rdi call free@PLT movq %r15, %rdi call free@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT movq 88(%rsp), %rax subq %fs:40, %rax jne .L57 movl $0, %eax addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L57: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z13sum_vectors2DPfS_S_ii" .LC2: .string "_Z13sum_vectors1DPfS_S_ii" .LC3: .string "_Z12fill_array1DPfS_ii" .LC4: .string "_Z12fill_array2DPfS_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3703: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z13sum_vectors2DPfS_S_ii(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z13sum_vectors1DPfS_S_ii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z12fill_array1DPfS_ii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z12fill_array2DPfS_ii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3703: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "CUDA_Suma_Populare.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z27__device_stub__fill_array2DPfS_ii # -- Begin function _Z27__device_stub__fill_array2DPfS_ii .p2align 4, 0x90 .type _Z27__device_stub__fill_array2DPfS_ii,@function _Z27__device_stub__fill_array2DPfS_ii: # @_Z27__device_stub__fill_array2DPfS_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12fill_array2DPfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z27__device_stub__fill_array2DPfS_ii, .Lfunc_end0-_Z27__device_stub__fill_array2DPfS_ii .cfi_endproc # -- End function .globl _Z27__device_stub__fill_array1DPfS_ii # -- Begin function _Z27__device_stub__fill_array1DPfS_ii .p2align 4, 0x90 .type _Z27__device_stub__fill_array1DPfS_ii,@function _Z27__device_stub__fill_array1DPfS_ii: # @_Z27__device_stub__fill_array1DPfS_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12fill_array1DPfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z27__device_stub__fill_array1DPfS_ii, .Lfunc_end1-_Z27__device_stub__fill_array1DPfS_ii .cfi_endproc # -- End function .globl _Z28__device_stub__sum_vectors1DPfS_S_ii # -- Begin function _Z28__device_stub__sum_vectors1DPfS_S_ii .p2align 4, 0x90 .type _Z28__device_stub__sum_vectors1DPfS_S_ii,@function _Z28__device_stub__sum_vectors1DPfS_S_ii: # @_Z28__device_stub__sum_vectors1DPfS_S_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z13sum_vectors1DPfS_S_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end2: .size _Z28__device_stub__sum_vectors1DPfS_S_ii, .Lfunc_end2-_Z28__device_stub__sum_vectors1DPfS_S_ii .cfi_endproc # -- End function .globl _Z28__device_stub__sum_vectors2DPfS_S_ii # -- Begin function _Z28__device_stub__sum_vectors2DPfS_S_ii .p2align 4, 0x90 .type _Z28__device_stub__sum_vectors2DPfS_S_ii,@function _Z28__device_stub__sum_vectors2DPfS_S_ii: # @_Z28__device_stub__sum_vectors2DPfS_S_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z13sum_vectors2DPfS_S_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end3: .size _Z28__device_stub__sum_vectors2DPfS_S_ii, .Lfunc_end3-_Z28__device_stub__sum_vectors2DPfS_S_ii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $152, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movabsq $137438953504, %r12 # imm = 0x2000000020 movabsq $68719476752, %r13 # imm = 0x1000000010 movl $1048576, %edi # imm = 0x100000 callq malloc movq %rax, %rbx movl $1048576, %edi # imm = 0x100000 callq malloc movq %rax, %r14 movl $1048576, %edi # imm = 0x100000 callq malloc movq %rax, %r15 leaq 96(%rsp), %rdi movl $1048576, %esi # imm = 0x100000 callq hipMalloc leaq 88(%rsp), %rdi movl $1048576, %esi # imm = 0x100000 callq hipMalloc leaq 104(%rsp), %rdi movl $1048576, %esi # imm = 0x100000 callq hipMalloc movq %r13, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_2 # %bb.1: movq 96(%rsp), %rax movq 88(%rsp), %rcx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) movl $512, 8(%rsp) # imm = 0x200 movl $512, 4(%rsp) # imm = 0x200 leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 8(%rsp), %rax movq %rax, 128(%rsp) leaq 4(%rsp), %rax movq %rax, 136(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 56(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z12fill_array2DPfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 64(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_2: movq %r13, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_4 # %bb.3: movq 96(%rsp), %rax movq 88(%rsp), %rcx movq 104(%rsp), %rdx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) movq %rdx, 56(%rsp) movl $512, 4(%rsp) # imm = 0x200 movl $512, 84(%rsp) # imm = 0x200 leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 4(%rsp), %rax movq %rax, 136(%rsp) leaq 84(%rsp), %rax movq %rax, 144(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z13sum_vectors2DPfS_S_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_4: movabsq $4294967808, %r12 # imm = 0x100000200 movq %r12, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_6 # %bb.5: movq 96(%rsp), %rax movq 88(%rsp), %rcx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) movl $512, 8(%rsp) # imm = 0x200 movl $512, 4(%rsp) # imm = 0x200 leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 8(%rsp), %rax movq %rax, 128(%rsp) leaq 4(%rsp), %rax movq %rax, 136(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 56(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z12fill_array1DPfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 64(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_6: movq %r12, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_8 # %bb.7: movq 96(%rsp), %rax movq 88(%rsp), %rcx movq 104(%rsp), %rdx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) movq %rdx, 56(%rsp) movl $512, 4(%rsp) # imm = 0x200 movl $512, 84(%rsp) # imm = 0x200 leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 4(%rsp), %rax movq %rax, 136(%rsp) leaq 84(%rsp), %rax movq %rax, 144(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z13sum_vectors1DPfS_S_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_8: movq 96(%rsp), %rsi movl $1048576, %edx # imm = 0x100000 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movq 88(%rsp), %rsi movl $1048576, %edx # imm = 0x100000 movq %r14, %rdi movl $2, %ecx callq hipMemcpy movq 104(%rsp), %rsi movl $1048576, %edx # imm = 0x100000 movq %r15, %rdi movl $2, %ecx callq hipMemcpy xorl %r13d, %r13d movq %r15, %rbp jmp .LBB4_9 .p2align 4, 0x90 .LBB4_14: # in Loop: Header=BB4_9 Depth=1 movq %r12, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r12), %rax movq %r12, %rdi movl $10, %esi callq *48(%rax) .LBB4_15: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit # in Loop: Header=BB4_9 Depth=1 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv incq %r13 addq $2048, %rbp # imm = 0x800 cmpq $512, %r13 # imm = 0x200 je .LBB4_16 .LBB4_9: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB4_10 Depth 2 xorl %r12d, %r12d .p2align 4, 0x90 .LBB4_10: # Parent Loop BB4_9 Depth=1 # => This Inner Loop Header: Depth=2 movss (%rbp,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ movl $.L.str, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l incq %r12 cmpq $512, %r12 # imm = 0x200 jne .LBB4_10 # %bb.11: # in Loop: Header=BB4_9 Depth=1 movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r12 testq %r12, %r12 je .LBB4_17 # %bb.12: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i # in Loop: Header=BB4_9 Depth=1 cmpb $0, 56(%r12) je .LBB4_14 # %bb.13: # in Loop: Header=BB4_9 Depth=1 movzbl 67(%r12), %eax jmp .LBB4_15 .LBB4_16: movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free movq 96(%rsp), %rdi callq hipFree movq 88(%rsp), %rdi callq hipFree movq 104(%rsp), %rdi callq hipFree xorl %eax, %eax addq $152, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB4_17: .cfi_def_cfa_offset 208 callq _ZSt16__throw_bad_castv .Lfunc_end4: .size main, .Lfunc_end4-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB5_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB5_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12fill_array2DPfS_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12fill_array1DPfS_ii, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z13sum_vectors1DPfS_S_ii, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z13sum_vectors2DPfS_S_ii, %esi movl $.L__unnamed_4, %edx movl $.L__unnamed_4, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end5: .size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB6_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB6_2: retq .Lfunc_end6: .size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor .cfi_endproc # -- End function .type _Z12fill_array2DPfS_ii,@object # @_Z12fill_array2DPfS_ii .section .rodata,"a",@progbits .globl _Z12fill_array2DPfS_ii .p2align 3, 0x0 _Z12fill_array2DPfS_ii: .quad _Z27__device_stub__fill_array2DPfS_ii .size _Z12fill_array2DPfS_ii, 8 .type _Z12fill_array1DPfS_ii,@object # @_Z12fill_array1DPfS_ii .globl _Z12fill_array1DPfS_ii .p2align 3, 0x0 _Z12fill_array1DPfS_ii: .quad _Z27__device_stub__fill_array1DPfS_ii .size _Z12fill_array1DPfS_ii, 8 .type _Z13sum_vectors1DPfS_S_ii,@object # @_Z13sum_vectors1DPfS_S_ii .globl _Z13sum_vectors1DPfS_S_ii .p2align 3, 0x0 _Z13sum_vectors1DPfS_S_ii: .quad _Z28__device_stub__sum_vectors1DPfS_S_ii .size _Z13sum_vectors1DPfS_S_ii, 8 .type _Z13sum_vectors2DPfS_S_ii,@object # @_Z13sum_vectors2DPfS_S_ii .globl _Z13sum_vectors2DPfS_S_ii .p2align 3, 0x0 _Z13sum_vectors2DPfS_S_ii: .quad _Z28__device_stub__sum_vectors2DPfS_S_ii .size _Z13sum_vectors2DPfS_S_ii, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz " " .size .L.str, 2 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z12fill_array2DPfS_ii" .size .L__unnamed_1, 23 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z12fill_array1DPfS_ii" .size .L__unnamed_2, 23 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z13sum_vectors1DPfS_S_ii" .size .L__unnamed_3, 26 .type .L__unnamed_4,@object # @3 .L__unnamed_4: .asciz "_Z13sum_vectors2DPfS_S_ii" .size .L__unnamed_4, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__fill_array2DPfS_ii .addrsig_sym _Z27__device_stub__fill_array1DPfS_ii .addrsig_sym _Z28__device_stub__sum_vectors1DPfS_S_ii .addrsig_sym _Z28__device_stub__sum_vectors2DPfS_S_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12fill_array2DPfS_ii .addrsig_sym _Z12fill_array1DPfS_ii .addrsig_sym _Z13sum_vectors1DPfS_S_ii .addrsig_sym _Z13sum_vectors2DPfS_S_ii .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
extern "C" __global__ void lifeStep(char** lifeData, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int right = (x + 1) % width; int left = (x + width - 1) % width; int top = (y + height - 1) % height; int down = (y + 1) % height; // Count alive cells. int aliveCells = lifeData[left][top] + lifeData[x][top] + lifeData[right][top] + lifeData[left][y] + lifeData[right][y] + lifeData[left][down] + lifeData[x][down] + lifeData[right][down]; lifeData[x][y] = aliveCells == 3 || (aliveCells == 2 && lifeData[x][y]) ? 1 : 0; }
code for sm_80 Function : lifeStep .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ IABS R7, c[0x0][0x168] ; /* 0x00005a0000077a13 */ /* 0x000fe20000000000 */ /*0020*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ I2F.RP R4, R7 ; /* 0x0000000700047306 */ /* 0x000e620000209400 */ /*0050*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e2e0000002100 */ /*0060*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */ /* 0x002e620000001000 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0205 */ /*0080*/ IADD3 R5, R0, c[0x0][0x168], RZ ; /* 0x00005a0000057a10 */ /* 0x000fe40007ffe0ff */ /*0090*/ IADD3 R2, R4, 0xffffffe, RZ ; /* 0x0ffffffe04027810 */ /* 0x002fe40007ffe0ff */ /*00a0*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */ /* 0x000fe40007ffe0ff */ /*00b0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */ /* 0x000062000021f000 */ /*00c0*/ IADD3 R4, R0, 0x1, RZ ; /* 0x0000000100047810 */ /* 0x000fc80007ffe0ff */ /*00d0*/ IABS R8, R4 ; /* 0x0000000400087213 */ /* 0x000fe40000000000 */ /*00e0*/ ISETP.GE.AND P3, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fe20003f66270 */ /*00f0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */ /* 0x001fe400078e00ff */ /*0100*/ IMAD.MOV R6, RZ, RZ, -R3 ; /* 0x000000ffff067224 */ /* 0x002fc800078e0a03 */ /*0110*/ IMAD R9, R6, R7, RZ ; /* 0x0000000706097224 */ /* 0x000fe200078e02ff */ /*0120*/ IABS R6, R5 ; /* 0x0000000500067213 */ /* 0x000fc60000000000 */ /*0130*/ IMAD.HI.U32 R3, R3, R9, R2 ; /* 0x0000000903037227 */ /* 0x000fc800078e0002 */ /*0140*/ IMAD.MOV.U32 R9, RZ, RZ, 0x8 ; /* 0x00000008ff097424 */ /* 0x000fe400078e00ff */ /*0150*/ IMAD.HI.U32 R2, R3, R6, RZ ; /* 0x0000000603027227 */ /* 0x000fc800078e00ff */ /*0160*/ IMAD.MOV R2, RZ, RZ, -R2 ; /* 0x000000ffff027224 */ /* 0x000fe400078e0a02 */ /*0170*/ IMAD.HI.U32 R3, R3, R8, RZ ; /* 0x0000000803037227 */ /* 0x000fc800078e00ff */ /*0180*/ IMAD R6, R7, R2, R6 ; /* 0x0000000207067224 */ /* 0x000fe400078e0206 */ /*0190*/ IMAD.MOV R3, RZ, RZ, -R3 ; /* 0x000000ffff037224 */ /* 0x000fc600078e0a03 */ /*01a0*/ ISETP.GT.U32.AND P1, PT, R7.reuse, R6, PT ; /* 0x000000060700720c */ /* 0x040fe20003f24070 */ /*01b0*/ IMAD R2, R7, R3, R8 ; /* 0x0000000307027224 */ /* 0x000fca00078e0208 */ /*01c0*/ ISETP.GT.U32.AND P0, PT, R7, R2, PT ; /* 0x000000020700720c */ /* 0x000fce0003f04070 */ /*01d0*/ @!P1 IMAD.IADD R6, R6, 0x1, -R7 ; /* 0x0000000106069824 */ /* 0x000fca00078e0a07 */ /*01e0*/ ISETP.GT.U32.AND P2, PT, R7, R6, PT ; /* 0x000000060700720c */ /* 0x000fe20003f44070 */ /*01f0*/ @!P0 IMAD.IADD R2, R2, 0x1, -R7 ; /* 0x0000000102028824 */ /* 0x000fe200078e0a07 */ /*0200*/ ISETP.GE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fe40003f06270 */ /*0210*/ LOP3.LUT R5, RZ, c[0x0][0x168], RZ, 0x33, !PT ; /* 0x00005a00ff057a12 */ /* 0x000fe400078e33ff */ /*0220*/ ISETP.GT.U32.AND P1, PT, R7, R2, PT ; /* 0x000000020700720c */ /* 0x000fce0003f24070 */ /*0230*/ @!P2 IMAD.IADD R6, R6, 0x1, -R7 ; /* 0x000000010606a824 */ /* 0x000fe200078e0a07 */ /*0240*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */ /* 0x000fc60003f45270 */ /*0250*/ @!P0 IMAD.MOV R6, RZ, RZ, -R6 ; /* 0x000000ffff068224 */ /* 0x000fe400078e0a06 */ /*0260*/ @!P1 IMAD.IADD R2, R2, 0x1, -R7 ; /* 0x0000000102029824 */ /* 0x000fc600078e0a07 */ /*0270*/ SEL R6, R5, R6, !P2 ; /* 0x0000000605067207 */ /* 0x000fe20005000000 */ /*0280*/ IMAD.MOV.U32 R4, RZ, RZ, R2 ; /* 0x000000ffff047224 */ /* 0x000fc800078e0002 */ /*0290*/ IMAD.WIDE R6, R6, R9, c[0x0][0x160] ; /* 0x0000580006067625 */ /* 0x000fc800078e0209 */ /*02a0*/ @!P3 IMAD.MOV R4, RZ, RZ, -R4 ; /* 0x000000ffff04b224 */ /* 0x000fe400078e0a04 */ /*02b0*/ LDG.E.64 R6, [R6.64] ; /* 0x0000000406067981 */ /* 0x000ea2000c1e1b00 */ /*02c0*/ IMAD.WIDE R2, R0, R9, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fe400078e0209 */ /*02d0*/ SEL R4, R5, R4, !P2 ; /* 0x0000000405047207 */ /* 0x000fc80005000000 */ /*02e0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ee2000c1e1b00 */ /*02f0*/ IMAD.WIDE R4, R4, R9, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fcc00078e0209 */ /*0300*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000f22000c1e1b00 */ /*0310*/ IABS R0, c[0x0][0x16c] ; /* 0x00005b0000007a13 */ /* 0x000fc60000000000 */ /*0320*/ S2R R9, SR_CTAID.Y ; /* 0x0000000000097919 */ /* 0x000e220000002600 */ /*0330*/ I2F.RP R12, R0 ; /* 0x00000000000c7306 */ /* 0x000e660000209400 */ /*0340*/ S2R R8, SR_TID.Y ; /* 0x0000000000087919 */ /* 0x000e2a0000002200 */ /*0350*/ MUFU.RCP R12, R12 ; /* 0x0000000c000c7308 */ /* 0x002e620000001000 */ /*0360*/ IMAD R9, R9, c[0x0][0x4], R8 ; /* 0x0000010009097a24 */ /* 0x001fe200078e0208 */ /*0370*/ IADD3 R10, R12, 0xffffffe, RZ ; /* 0x0ffffffe0c0a7810 */ /* 0x002fc80007ffe0ff */ /*0380*/ IADD3 R8, R9.reuse, c[0x0][0x16c], RZ ; /* 0x00005b0009087a10 */ /* 0x040fe40007ffe0ff */ /*0390*/ F2I.FTZ.U32.TRUNC.NTZ R11, R10 ; /* 0x0000000a000b7305 */ /* 0x000062000021f000 */ /*03a0*/ IADD3 R13, R9, 0x1, RZ ; /* 0x00000001090d7810 */ /* 0x000fe40007ffe0ff */ /*03b0*/ IADD3 R8, R8, -0x1, RZ ; /* 0xffffffff08087810 */ /* 0x000fe40007ffe0ff */ /*03c0*/ IABS R17, R13 ; /* 0x0000000d00117213 */ /* 0x000fe40000000000 */ /*03d0*/ IABS R12, R8 ; /* 0x00000008000c7213 */ /* 0x000fe20000000000 */ /*03e0*/ IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a7224 */ /* 0x001fc400078e00ff */ /*03f0*/ IMAD.MOV R15, RZ, RZ, -R11 ; /* 0x000000ffff0f7224 */ /* 0x002fc800078e0a0b */ /*0400*/ IMAD R15, R15, R0, RZ ; /* 0x000000000f0f7224 */ /* 0x000fc800078e02ff */ /*0410*/ IMAD.HI.U32 R10, R11, R15, R10 ; /* 0x0000000f0b0a7227 */ /* 0x000fc800078e000a */ /*0420*/ IMAD.MOV.U32 R15, RZ, RZ, R12 ; /* 0x000000ffff0f7224 */ /* 0x000fc800078e000c */ /*0430*/ IMAD.HI.U32 R11, R10, R15, RZ ; /* 0x0000000f0a0b7227 */ /* 0x000fc800078e00ff */ /*0440*/ IMAD.HI.U32 R10, R10, R17, RZ ; /* 0x000000110a0a7227 */ /* 0x000fc800078e00ff */ /*0450*/ IMAD.MOV R11, RZ, RZ, -R11 ; /* 0x000000ffff0b7224 */ /* 0x000fe400078e0a0b */ /*0460*/ IMAD.MOV R10, RZ, RZ, -R10 ; /* 0x000000ffff0a7224 */ /* 0x000fe400078e0a0a */ /*0470*/ IMAD R11, R0.reuse, R11, R15 ; /* 0x0000000b000b7224 */ /* 0x040fe400078e020f */ /*0480*/ IMAD R15, R0, R10, R17 ; /* 0x0000000a000f7224 */ /* 0x000fc600078e0211 */ /*0490*/ ISETP.GT.U32.AND P0, PT, R0.reuse, R11, PT ; /* 0x0000000b0000720c */ /* 0x040fe40003f04070 */ /*04a0*/ ISETP.GT.U32.AND P1, PT, R0, R15, PT ; /* 0x0000000f0000720c */ /* 0x000fd60003f24070 */ /*04b0*/ @!P0 IMAD.IADD R11, R11, 0x1, -R0.reuse ; /* 0x000000010b0b8824 */ /* 0x100fe200078e0a00 */ /*04c0*/ ISETP.GE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */ /* 0x000fe20003f06270 */ /*04d0*/ @!P1 IMAD.IADD R15, R15, 0x1, -R0 ; /* 0x000000010f0f9824 */ /* 0x000fe200078e0a00 */ /*04e0*/ ISETP.GE.AND P1, PT, R13, RZ, PT ; /* 0x000000ff0d00720c */ /* 0x000fe40003f26270 */ /*04f0*/ ISETP.GT.U32.AND P2, PT, R0.reuse, R11, PT ; /* 0x0000000b0000720c */ /* 0x040fe40003f44070 */ /*0500*/ ISETP.GT.U32.AND P3, PT, R0, R15, PT ; /* 0x0000000f0000720c */ /* 0x000fd60003f64070 */ /*0510*/ @!P2 IMAD.IADD R11, R11, 0x1, -R0.reuse ; /* 0x000000010b0ba824 */ /* 0x100fe200078e0a00 */ /*0520*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x16c], PT ; /* 0x00005b00ff007a0c */ /* 0x000fe20003f45270 */ /*0530*/ @!P3 IMAD.IADD R15, R15, 0x1, -R0 ; /* 0x000000010f0fb824 */ /* 0x000fe200078e0a00 */ /*0540*/ LOP3.LUT R0, RZ, c[0x0][0x16c], RZ, 0x33, !PT ; /* 0x00005b00ff007a12 */ /* 0x000fe200078e33ff */ /*0550*/ @!P0 IMAD.MOV R11, RZ, RZ, -R11 ; /* 0x000000ffff0b8224 */ /* 0x000fe400078e0a0b */ /*0560*/ @!P1 IMAD.MOV R15, RZ, RZ, -R15 ; /* 0x000000ffff0f9224 */ /* 0x000fc600078e0a0f */ /*0570*/ SEL R17, R0.reuse, R11, !P2 ; /* 0x0000000b00117207 */ /* 0x040fe40005000000 */ /*0580*/ SHF.R.S32.HI R11, RZ, 0x1f, R9 ; /* 0x0000001fff0b7819 */ /* 0x000fe40000011409 */ /*0590*/ SHF.R.S32.HI R13, RZ, 0x1f, R17 ; /* 0x0000001fff0d7819 */ /* 0x000fe40000011411 */ /*05a0*/ SEL R23, R0, R15, !P2 ; /* 0x0000000f00177207 */ /* 0x000fc80005000000 */ /*05b0*/ SHF.R.S32.HI R19, RZ, 0x1f, R23 ; /* 0x0000001fff137819 */ /* 0x000fe40000011417 */ /*05c0*/ IADD3 R26, P0, R17, R6.reuse, RZ ; /* 0x00000006111a7210 */ /* 0x084fe40007f1e0ff */ /*05d0*/ IADD3 R24, P1, R9, R6.reuse, RZ ; /* 0x0000000609187210 */ /* 0x080fe40007f3e0ff */ /*05e0*/ IADD3 R6, P2, R23, R6, RZ ; /* 0x0000000617067210 */ /* 0x000fe20007f5e0ff */ /*05f0*/ IMAD.X R27, R7, 0x1, R13, P0 ; /* 0x00000001071b7824 */ /* 0x000fe200000e060d */ /*0600*/ IADD3 R20, P0, R17, R2, RZ ; /* 0x0000000211147210 */ /* 0x008fe20007f1e0ff */ /*0610*/ IMAD.X R25, R7, 0x1, R11, P1 ; /* 0x0000000107197824 */ /* 0x000fc600008e060b */ /*0620*/ LDG.E.S8 R0, [R26.64] ; /* 0x000000041a007981 */ /* 0x000ea2000c1e1300 */ /*0630*/ IMAD.X R7, R7, 0x1, R19, P2 ; /* 0x0000000107077824 */ /* 0x000fe400010e0613 */ /*0640*/ IMAD.X R21, R3, 0x1, R13, P0 ; /* 0x0000000103157824 */ /* 0x000fe200000e060d */ /*0650*/ IADD3 R14, P0, R17, R4.reuse, RZ ; /* 0x00000004110e7210 */ /* 0x090fe20007f1e0ff */ /*0660*/ LDG.E.S8 R8, [R24.64] ; /* 0x0000000418087981 */ /* 0x000ee2000c1e1300 */ /*0670*/ IADD3 R12, P1, R9, R4, RZ ; /* 0x00000004090c7210 */ /* 0x000fc60007f3e0ff */ /*0680*/ IMAD.X R15, R5, 0x1, R13, P0 ; /* 0x00000001050f7824 */ /* 0x000fe200000e060d */ /*0690*/ IADD3 R16, P0, R23, R2, RZ ; /* 0x0000000217107210 */ /* 0x000fe20007f1e0ff */ /*06a0*/ IMAD.X R13, R5, 0x1, R11, P1 ; /* 0x00000001050d7824 */ /* 0x000fe200008e060b */ /*06b0*/ LDG.E.S8 R21, [R20.64] ; /* 0x0000000414157981 */ /* 0x000ea6000c1e1300 */ /*06c0*/ IMAD.X R17, R3, 0x1, R19, P0 ; /* 0x0000000103117824 */ /* 0x000fe200000e0613 */ /*06d0*/ LDG.E.S8 R14, [R14.64] ; /* 0x000000040e0e7981 */ /* 0x000ea2000c1e1300 */ /*06e0*/ IADD3 R4, P0, R23, R4, RZ ; /* 0x0000000417047210 */ /* 0x000fc60007f1e0ff */ /*06f0*/ LDG.E.S8 R12, [R12.64] ; /* 0x000000040c0c7981 */ /* 0x000ee4000c1e1300 */ /*0700*/ IMAD.X R5, R5, 0x1, R19, P0 ; /* 0x0000000105057824 */ /* 0x000fe400000e0613 */ /*0710*/ LDG.E.S8 R7, [R6.64] ; /* 0x0000000406077981 */ /* 0x000f28000c1e1300 */ /*0720*/ LDG.E.S8 R16, [R16.64] ; /* 0x0000000410107981 */ /* 0x000f28000c1e1300 */ /*0730*/ LDG.E.S8 R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000f62000c1e1300 */ /*0740*/ IADD3 R2, P1, R9, R2, RZ ; /* 0x0000000209027210 */ /* 0x000fe20007f3e0ff */ /*0750*/ BSSY B0, 0x860 ; /* 0x0000010000007945 */ /* 0x000fe80003800000 */ /*0760*/ IMAD.X R3, R3, 0x1, R11, P1 ; /* 0x0000000103037824 */ /* 0x000fe200008e060b */ /*0770*/ IADD3 R21, R14, R21, R0 ; /* 0x000000150e157210 */ /* 0x004fc80007ffe000 */ /*0780*/ IADD3 R8, R12, R21, R8 ; /* 0x000000150c087210 */ /* 0x008fc80007ffe008 */ /*0790*/ IADD3 R8, R16, R8, R7 ; /* 0x0000000810087210 */ /* 0x010fca0007ffe007 */ /*07a0*/ IMAD.IADD R8, R8, 0x1, R5 ; /* 0x0000000108087824 */ /* 0x020fca00078e0205 */ /*07b0*/ ISETP.NE.AND P0, PT, R8, 0x3, PT ; /* 0x000000030800780c */ /* 0x000fe20003f05270 */ /*07c0*/ IMAD.MOV.U32 R0, RZ, RZ, 0x1 ; /* 0x00000001ff007424 */ /* 0x000fd800078e00ff */ /*07d0*/ @!P0 BRA 0x850 ; /* 0x0000007000008947 */ /* 0x000fea0003800000 */ /*07e0*/ ISETP.NE.AND P0, PT, R8, 0x2, PT ; /* 0x000000020800780c */ /* 0x000fda0003f05270 */ /*07f0*/ @P0 BRA 0x840 ; /* 0x0000004000000947 */ /* 0x000fea0003800000 */ /*0800*/ LDG.E.U8 R0, [R2.64] ; /* 0x0000000402007981 */ /* 0x000ea4000c1e1100 */ /*0810*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x004fc80003f05270 */ /*0820*/ SEL R0, RZ, 0x1, !P0 ; /* 0x00000001ff007807 */ /* 0x000fe20004000000 */ /*0830*/ BRA 0x850 ; /* 0x0000001000007947 */ /* 0x000fea0003800000 */ /*0840*/ PRMT R0, RZ, 0x7610, R0 ; /* 0x00007610ff007816 */ /* 0x000fe40000000000 */ /*0850*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0860*/ STG.E.U8 [R2.64], R0 ; /* 0x0000000002007986 */ /* 0x000fe2000c101104 */ /*0870*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0880*/ BRA 0x880; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0890*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0900*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0910*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0920*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0930*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0940*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0950*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0960*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0970*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
extern "C" __global__ void lifeStep(char** lifeData, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int right = (x + 1) % width; int left = (x + width - 1) % width; int top = (y + height - 1) % height; int down = (y + 1) % height; // Count alive cells. int aliveCells = lifeData[left][top] + lifeData[x][top] + lifeData[right][top] + lifeData[left][y] + lifeData[right][y] + lifeData[left][down] + lifeData[x][down] + lifeData[right][down]; lifeData[x][y] = aliveCells == 3 || (aliveCells == 2 && lifeData[x][y]) ? 1 : 0; }
.file "tmpxft_000bd2df_00000000-6_LifeStep.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z30__device_stub__Z8lifeStepPPciiPPcii .type _Z30__device_stub__Z8lifeStepPPciiPPcii, @function _Z30__device_stub__Z8lifeStepPPciiPPcii: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movq %rsp, %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq lifeStep(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z30__device_stub__Z8lifeStepPPciiPPcii, .-_Z30__device_stub__Z8lifeStepPPciiPPcii .globl lifeStep .type lifeStep, @function lifeStep: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z30__device_stub__Z8lifeStepPPciiPPcii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size lifeStep, .-lifeStep .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "lifeStep" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq lifeStep(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
extern "C" __global__ void lifeStep(char** lifeData, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int right = (x + 1) % width; int left = (x + width - 1) % width; int top = (y + height - 1) % height; int down = (y + 1) % height; // Count alive cells. int aliveCells = lifeData[left][top] + lifeData[x][top] + lifeData[right][top] + lifeData[left][y] + lifeData[right][y] + lifeData[left][down] + lifeData[x][down] + lifeData[right][down]; lifeData[x][y] = aliveCells == 3 || (aliveCells == 2 && lifeData[x][y]) ? 1 : 0; }
#include <hip/hip_runtime.h> extern "C" __global__ void lifeStep(char** lifeData, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int right = (x + 1) % width; int left = (x + width - 1) % width; int top = (y + height - 1) % height; int down = (y + 1) % height; // Count alive cells. int aliveCells = lifeData[left][top] + lifeData[x][top] + lifeData[right][top] + lifeData[left][y] + lifeData[right][y] + lifeData[left][down] + lifeData[x][down] + lifeData[right][down]; lifeData[x][y] = aliveCells == 3 || (aliveCells == 2 && lifeData[x][y]) ? 1 : 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> extern "C" __global__ void lifeStep(char** lifeData, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int right = (x + 1) % width; int left = (x + width - 1) % width; int top = (y + height - 1) % height; int down = (y + 1) % height; // Count alive cells. int aliveCells = lifeData[left][top] + lifeData[x][top] + lifeData[right][top] + lifeData[left][y] + lifeData[right][y] + lifeData[left][down] + lifeData[x][down] + lifeData[right][down]; lifeData[x][y] = aliveCells == 3 || (aliveCells == 2 && lifeData[x][y]) ? 1 : 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected lifeStep .globl lifeStep .p2align 8 .type lifeStep,@function lifeStep: s_clause 0x1 s_load_b64 s[6:7], s[0:1], 0x8 s_load_b32 s2, s[0:1], 0x1c v_bfe_u32 v4, v0, 10, 10 s_load_b64 s[0:1], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_ashr_i32 s3, s6, 31 s_ashr_i32 s5, s7, 31 s_add_i32 s4, s6, s3 s_delay_alu instid0(SALU_CYCLE_1) s_xor_b32 s4, s4, s3 s_add_i32 s3, s7, s5 v_cvt_f32_u32_e32 v1, s4 s_xor_b32 s3, s3, s5 s_and_b32 s5, s2, 0xffff v_cvt_f32_u32_e32 v2, s3 s_lshr_b32 s2, s2, 16 v_rcp_iflag_f32_e32 v5, v1 v_and_b32_e32 v1, 0x3ff, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_rcp_iflag_f32_e32 v6, v2 v_mad_u64_u32 v[2:3], null, s14, s5, v[1:2] s_sub_i32 s5, 0, s3 s_waitcnt_depctr 0xfff v_mul_f32_e32 v3, 0x4f7ffffe, v5 v_mad_u64_u32 v[0:1], null, s15, s2, v[4:5] v_mul_f32_e32 v1, 0x4f7ffffe, v6 v_add_nc_u32_e32 v4, 1, v2 s_delay_alu instid0(VALU_DEP_4) v_cvt_u32_f32_e32 v3, v3 s_sub_i32 s2, 0, s4 v_add3_u32 v6, s6, -1, v2 v_cvt_u32_f32_e32 v5, v1 v_add3_u32 v8, s7, -1, v0 v_mul_lo_u32 v7, s2, v3 v_add_nc_u32_e32 v9, 1, v0 v_ashrrev_i32_e32 v14, 31, v6 v_mul_lo_u32 v12, s5, v5 v_ashrrev_i32_e32 v1, 31, v4 v_ashrrev_i32_e32 v10, 31, v8 v_ashrrev_i32_e32 v11, 31, v9 v_add_nc_u32_e32 v6, v6, v14 v_mul_hi_u32 v7, v3, v7 v_add_nc_u32_e32 v4, v4, v1 v_add_nc_u32_e32 v8, v8, v10 v_mul_hi_u32 v17, v5, v12 v_add_nc_u32_e32 v9, v9, v11 v_xor_b32_e32 v16, v6, v14 v_xor_b32_e32 v15, v4, v1 v_xor_b32_e32 v12, v8, v10 v_add_nc_u32_e32 v3, v3, v7 v_xor_b32_e32 v13, v9, v11 s_mov_b32 s2, 0 v_add_nc_u32_e32 v17, v5, v17 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_mad_u64_u32 v[8:9], null, v16, v3, 0 v_mad_u64_u32 v[6:7], null, v15, v3, 0 v_mad_u64_u32 v[3:4], null, v12, v17, 0 v_mad_u64_u32 v[5:6], null, v13, v17, 0 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_mul_lo_u32 v3, v9, s4 v_mul_lo_u32 v5, v7, s4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v3, v16, v3 v_sub_nc_u32_e32 v5, v15, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v7, s4, v3 v_cmp_le_u32_e32 vcc_lo, s4, v3 v_subrev_nc_u32_e32 v8, s4, v5 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v3, v3, v7, vcc_lo v_cmp_le_u32_e32 vcc_lo, s4, v5 v_subrev_nc_u32_e32 v7, s4, v3 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v5, v5, v8, vcc_lo v_cmp_le_u32_e32 vcc_lo, s4, v3 v_subrev_nc_u32_e32 v8, s4, v5 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v3, v3, v7, vcc_lo v_cmp_le_u32_e32 vcc_lo, s4, v5 v_xor_b32_e32 v3, v3, v14 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v5, v5, v8, vcc_lo v_sub_nc_u32_e32 v7, v3, v14 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_xor_b32_e32 v5, v5, v1 v_ashrrev_i32_e32 v3, 31, v2 v_ashrrev_i32_e32 v8, 31, v7 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_sub_nc_u32_e32 v14, v5, v1 v_lshlrev_b64 v[1:2], 3, v[2:3] v_mul_lo_u32 v3, v4, s3 v_mul_lo_u32 v4, v6, s3 v_lshlrev_b64 v[7:8], 3, v[7:8] v_ashrrev_i32_e32 v15, 31, v14 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshlrev_b64 v[14:15], 3, v[14:15] v_add_co_u32 v7, vcc_lo, s0, v7 s_delay_alu instid0(VALU_DEP_4) v_add_co_ci_u32_e32 v8, vcc_lo, s1, v8, vcc_lo v_add_co_u32 v1, vcc_lo, s0, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo v_add_co_u32 v14, vcc_lo, s0, v14 global_load_b64 v[7:8], v[7:8], off v_add_co_ci_u32_e32 v15, vcc_lo, s1, v15, vcc_lo s_clause 0x1 global_load_b64 v[1:2], v[1:2], off global_load_b64 v[14:15], v[14:15], off v_sub_nc_u32_e32 v3, v12, v3 v_sub_nc_u32_e32 v4, v13, v4 s_mov_b32 s1, exec_lo s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v5, s3, v3 v_cmp_le_u32_e32 vcc_lo, s3, v3 v_subrev_nc_u32_e32 v6, s3, v4 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v3, v3, v5, vcc_lo v_cmp_le_u32_e32 vcc_lo, s3, v4 v_subrev_nc_u32_e32 v5, s3, v3 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v4, v4, v6, vcc_lo v_cmp_le_u32_e32 vcc_lo, s3, v3 v_subrev_nc_u32_e32 v6, s3, v4 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v3, v3, v5, vcc_lo v_cmp_le_u32_e32 vcc_lo, s3, v4 v_xor_b32_e32 v3, v3, v10 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v4, v4, v6, vcc_lo v_sub_nc_u32_e32 v6, v3, v10 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_xor_b32_e32 v4, v4, v11 v_ashrrev_i32_e32 v3, 31, v0 v_ashrrev_i32_e32 v12, 31, v6 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_nc_u32_e32 v13, v4, v11 v_ashrrev_i32_e32 v20, 31, v13 s_waitcnt vmcnt(2) v_add_co_u32 v4, vcc_lo, v7, v6 v_add_co_ci_u32_e32 v5, vcc_lo, v8, v12, vcc_lo s_waitcnt vmcnt(1) v_add_co_u32 v9, vcc_lo, v1, v6 v_add_co_ci_u32_e32 v10, vcc_lo, v2, v12, vcc_lo s_waitcnt vmcnt(0) v_add_co_u32 v11, vcc_lo, v14, v6 v_add_co_ci_u32_e32 v12, vcc_lo, v15, v12, vcc_lo v_add_co_u32 v16, vcc_lo, v7, v0 v_add_co_ci_u32_e32 v17, vcc_lo, v8, v3, vcc_lo v_add_co_u32 v18, vcc_lo, v14, v0 v_add_co_ci_u32_e32 v19, vcc_lo, v15, v3, vcc_lo v_add_co_u32 v6, vcc_lo, v7, v13 v_add_co_ci_u32_e32 v7, vcc_lo, v8, v20, vcc_lo flat_load_i8 v8, v[4:5] flat_load_i8 v9, v[9:10] flat_load_i8 v10, v[11:12] flat_load_i8 v11, v[16:17] flat_load_i8 v12, v[18:19] flat_load_i8 v16, v[6:7] v_add_co_u32 v4, vcc_lo, v1, v13 v_add_co_ci_u32_e32 v5, vcc_lo, v2, v20, vcc_lo v_add_co_u32 v6, vcc_lo, v14, v13 v_add_co_ci_u32_e32 v7, vcc_lo, v15, v20, vcc_lo flat_load_i8 v4, v[4:5] flat_load_i8 v5, v[6:7] s_waitcnt vmcnt(6) lgkmcnt(6) v_add_nc_u32_e32 v6, v9, v8 s_waitcnt vmcnt(4) lgkmcnt(4) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_add3_u32 v6, v6, v10, v11 s_waitcnt vmcnt(2) lgkmcnt(2) v_add3_u32 v6, v6, v12, v16 s_waitcnt vmcnt(0) lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add3_u32 v4, v6, v4, v5 v_cmpx_lt_i32_e32 2, v4 s_xor_b32 s1, exec_lo, s1 v_cmp_ne_u32_e32 vcc_lo, 3, v4 s_mov_b32 s0, -1 s_and_b32 s2, vcc_lo, exec_lo s_or_saveexec_b32 s3, s1 s_mov_b32 s1, 0 s_xor_b32 exec_lo, exec_lo, s3 v_cmp_ne_u32_e32 vcc_lo, 2, v4 s_and_not1_b32 s2, s2, exec_lo s_mov_b32 s1, exec_lo s_and_b32 s4, vcc_lo, exec_lo s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 s2, s2, s4 s_or_b32 exec_lo, exec_lo, s3 s_and_saveexec_b32 s3, s2 s_delay_alu instid0(SALU_CYCLE_1) s_xor_b32 s2, exec_lo, s3 s_and_not1_b32 s0, s0, exec_lo s_and_not1_b32 s1, s1, exec_lo s_or_b32 exec_lo, exec_lo, s2 s_and_saveexec_b32 s2, s1 s_cbranch_execz .LBB0_8 v_add_co_u32 v4, vcc_lo, v1, v0 v_add_co_ci_u32_e32 v5, vcc_lo, v2, v3, vcc_lo s_and_not1_b32 s0, s0, exec_lo flat_load_u8 v4, v[4:5] s_waitcnt vmcnt(0) lgkmcnt(0) v_cmp_ne_u16_e32 vcc_lo, 0, v4 s_and_b32 s1, vcc_lo, exec_lo s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 s0, s0, s1 .LBB0_8: s_or_b32 exec_lo, exec_lo, s2 v_add_co_u32 v0, vcc_lo, v1, v0 v_cndmask_b32_e64 v4, 0, 1, s0 v_add_co_ci_u32_e32 v1, vcc_lo, v2, v3, vcc_lo flat_store_b8 v[0:1], v4 s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel lifeStep .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 21 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size lifeStep, .Lfunc_end0-lifeStep .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: lifeStep .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: lifeStep.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 21 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> extern "C" __global__ void lifeStep(char** lifeData, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int right = (x + 1) % width; int left = (x + width - 1) % width; int top = (y + height - 1) % height; int down = (y + 1) % height; // Count alive cells. int aliveCells = lifeData[left][top] + lifeData[x][top] + lifeData[right][top] + lifeData[left][y] + lifeData[right][y] + lifeData[left][down] + lifeData[x][down] + lifeData[right][down]; lifeData[x][y] = aliveCells == 3 || (aliveCells == 2 && lifeData[x][y]) ? 1 : 0; }
.text .file "LifeStep.hip" .globl __device_stub__lifeStep # -- Begin function __device_stub__lifeStep .p2align 4, 0x90 .type __device_stub__lifeStep,@function __device_stub__lifeStep: # @__device_stub__lifeStep .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) movq %rsp, %rax movq %rax, 80(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $lifeStep, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size __device_stub__lifeStep, .Lfunc_end0-__device_stub__lifeStep .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $lifeStep, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type lifeStep,@object # @lifeStep .section .rodata,"a",@progbits .globl lifeStep .p2align 3, 0x0 lifeStep: .quad __device_stub__lifeStep .size lifeStep, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "lifeStep" .size .L__unnamed_1, 9 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__lifeStep .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym lifeStep .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : lifeStep .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ IABS R7, c[0x0][0x168] ; /* 0x00005a0000077a13 */ /* 0x000fe20000000000 */ /*0020*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ I2F.RP R4, R7 ; /* 0x0000000700047306 */ /* 0x000e620000209400 */ /*0050*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e2e0000002100 */ /*0060*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */ /* 0x002e620000001000 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0205 */ /*0080*/ IADD3 R5, R0, c[0x0][0x168], RZ ; /* 0x00005a0000057a10 */ /* 0x000fe40007ffe0ff */ /*0090*/ IADD3 R2, R4, 0xffffffe, RZ ; /* 0x0ffffffe04027810 */ /* 0x002fe40007ffe0ff */ /*00a0*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */ /* 0x000fe40007ffe0ff */ /*00b0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */ /* 0x000062000021f000 */ /*00c0*/ IADD3 R4, R0, 0x1, RZ ; /* 0x0000000100047810 */ /* 0x000fc80007ffe0ff */ /*00d0*/ IABS R8, R4 ; /* 0x0000000400087213 */ /* 0x000fe40000000000 */ /*00e0*/ ISETP.GE.AND P3, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fe20003f66270 */ /*00f0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */ /* 0x001fe400078e00ff */ /*0100*/ IMAD.MOV R6, RZ, RZ, -R3 ; /* 0x000000ffff067224 */ /* 0x002fc800078e0a03 */ /*0110*/ IMAD R9, R6, R7, RZ ; /* 0x0000000706097224 */ /* 0x000fe200078e02ff */ /*0120*/ IABS R6, R5 ; /* 0x0000000500067213 */ /* 0x000fc60000000000 */ /*0130*/ IMAD.HI.U32 R3, R3, R9, R2 ; /* 0x0000000903037227 */ /* 0x000fc800078e0002 */ /*0140*/ IMAD.MOV.U32 R9, RZ, RZ, 0x8 ; /* 0x00000008ff097424 */ /* 0x000fe400078e00ff */ /*0150*/ IMAD.HI.U32 R2, R3, R6, RZ ; /* 0x0000000603027227 */ /* 0x000fc800078e00ff */ /*0160*/ IMAD.MOV R2, RZ, RZ, -R2 ; /* 0x000000ffff027224 */ /* 0x000fe400078e0a02 */ /*0170*/ IMAD.HI.U32 R3, R3, R8, RZ ; /* 0x0000000803037227 */ /* 0x000fc800078e00ff */ /*0180*/ IMAD R6, R7, R2, R6 ; /* 0x0000000207067224 */ /* 0x000fe400078e0206 */ /*0190*/ IMAD.MOV R3, RZ, RZ, -R3 ; /* 0x000000ffff037224 */ /* 0x000fc600078e0a03 */ /*01a0*/ ISETP.GT.U32.AND P1, PT, R7.reuse, R6, PT ; /* 0x000000060700720c */ /* 0x040fe20003f24070 */ /*01b0*/ IMAD R2, R7, R3, R8 ; /* 0x0000000307027224 */ /* 0x000fca00078e0208 */ /*01c0*/ ISETP.GT.U32.AND P0, PT, R7, R2, PT ; /* 0x000000020700720c */ /* 0x000fce0003f04070 */ /*01d0*/ @!P1 IMAD.IADD R6, R6, 0x1, -R7 ; /* 0x0000000106069824 */ /* 0x000fca00078e0a07 */ /*01e0*/ ISETP.GT.U32.AND P2, PT, R7, R6, PT ; /* 0x000000060700720c */ /* 0x000fe20003f44070 */ /*01f0*/ @!P0 IMAD.IADD R2, R2, 0x1, -R7 ; /* 0x0000000102028824 */ /* 0x000fe200078e0a07 */ /*0200*/ ISETP.GE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fe40003f06270 */ /*0210*/ LOP3.LUT R5, RZ, c[0x0][0x168], RZ, 0x33, !PT ; /* 0x00005a00ff057a12 */ /* 0x000fe400078e33ff */ /*0220*/ ISETP.GT.U32.AND P1, PT, R7, R2, PT ; /* 0x000000020700720c */ /* 0x000fce0003f24070 */ /*0230*/ @!P2 IMAD.IADD R6, R6, 0x1, -R7 ; /* 0x000000010606a824 */ /* 0x000fe200078e0a07 */ /*0240*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */ /* 0x000fc60003f45270 */ /*0250*/ @!P0 IMAD.MOV R6, RZ, RZ, -R6 ; /* 0x000000ffff068224 */ /* 0x000fe400078e0a06 */ /*0260*/ @!P1 IMAD.IADD R2, R2, 0x1, -R7 ; /* 0x0000000102029824 */ /* 0x000fc600078e0a07 */ /*0270*/ SEL R6, R5, R6, !P2 ; /* 0x0000000605067207 */ /* 0x000fe20005000000 */ /*0280*/ IMAD.MOV.U32 R4, RZ, RZ, R2 ; /* 0x000000ffff047224 */ /* 0x000fc800078e0002 */ /*0290*/ IMAD.WIDE R6, R6, R9, c[0x0][0x160] ; /* 0x0000580006067625 */ /* 0x000fc800078e0209 */ /*02a0*/ @!P3 IMAD.MOV R4, RZ, RZ, -R4 ; /* 0x000000ffff04b224 */ /* 0x000fe400078e0a04 */ /*02b0*/ LDG.E.64 R6, [R6.64] ; /* 0x0000000406067981 */ /* 0x000ea2000c1e1b00 */ /*02c0*/ IMAD.WIDE R2, R0, R9, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fe400078e0209 */ /*02d0*/ SEL R4, R5, R4, !P2 ; /* 0x0000000405047207 */ /* 0x000fc80005000000 */ /*02e0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ee2000c1e1b00 */ /*02f0*/ IMAD.WIDE R4, R4, R9, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fcc00078e0209 */ /*0300*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000f22000c1e1b00 */ /*0310*/ IABS R0, c[0x0][0x16c] ; /* 0x00005b0000007a13 */ /* 0x000fc60000000000 */ /*0320*/ S2R R9, SR_CTAID.Y ; /* 0x0000000000097919 */ /* 0x000e220000002600 */ /*0330*/ I2F.RP R12, R0 ; /* 0x00000000000c7306 */ /* 0x000e660000209400 */ /*0340*/ S2R R8, SR_TID.Y ; /* 0x0000000000087919 */ /* 0x000e2a0000002200 */ /*0350*/ MUFU.RCP R12, R12 ; /* 0x0000000c000c7308 */ /* 0x002e620000001000 */ /*0360*/ IMAD R9, R9, c[0x0][0x4], R8 ; /* 0x0000010009097a24 */ /* 0x001fe200078e0208 */ /*0370*/ IADD3 R10, R12, 0xffffffe, RZ ; /* 0x0ffffffe0c0a7810 */ /* 0x002fc80007ffe0ff */ /*0380*/ IADD3 R8, R9.reuse, c[0x0][0x16c], RZ ; /* 0x00005b0009087a10 */ /* 0x040fe40007ffe0ff */ /*0390*/ F2I.FTZ.U32.TRUNC.NTZ R11, R10 ; /* 0x0000000a000b7305 */ /* 0x000062000021f000 */ /*03a0*/ IADD3 R13, R9, 0x1, RZ ; /* 0x00000001090d7810 */ /* 0x000fe40007ffe0ff */ /*03b0*/ IADD3 R8, R8, -0x1, RZ ; /* 0xffffffff08087810 */ /* 0x000fe40007ffe0ff */ /*03c0*/ IABS R17, R13 ; /* 0x0000000d00117213 */ /* 0x000fe40000000000 */ /*03d0*/ IABS R12, R8 ; /* 0x00000008000c7213 */ /* 0x000fe20000000000 */ /*03e0*/ IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a7224 */ /* 0x001fc400078e00ff */ /*03f0*/ IMAD.MOV R15, RZ, RZ, -R11 ; /* 0x000000ffff0f7224 */ /* 0x002fc800078e0a0b */ /*0400*/ IMAD R15, R15, R0, RZ ; /* 0x000000000f0f7224 */ /* 0x000fc800078e02ff */ /*0410*/ IMAD.HI.U32 R10, R11, R15, R10 ; /* 0x0000000f0b0a7227 */ /* 0x000fc800078e000a */ /*0420*/ IMAD.MOV.U32 R15, RZ, RZ, R12 ; /* 0x000000ffff0f7224 */ /* 0x000fc800078e000c */ /*0430*/ IMAD.HI.U32 R11, R10, R15, RZ ; /* 0x0000000f0a0b7227 */ /* 0x000fc800078e00ff */ /*0440*/ IMAD.HI.U32 R10, R10, R17, RZ ; /* 0x000000110a0a7227 */ /* 0x000fc800078e00ff */ /*0450*/ IMAD.MOV R11, RZ, RZ, -R11 ; /* 0x000000ffff0b7224 */ /* 0x000fe400078e0a0b */ /*0460*/ IMAD.MOV R10, RZ, RZ, -R10 ; /* 0x000000ffff0a7224 */ /* 0x000fe400078e0a0a */ /*0470*/ IMAD R11, R0.reuse, R11, R15 ; /* 0x0000000b000b7224 */ /* 0x040fe400078e020f */ /*0480*/ IMAD R15, R0, R10, R17 ; /* 0x0000000a000f7224 */ /* 0x000fc600078e0211 */ /*0490*/ ISETP.GT.U32.AND P0, PT, R0.reuse, R11, PT ; /* 0x0000000b0000720c */ /* 0x040fe40003f04070 */ /*04a0*/ ISETP.GT.U32.AND P1, PT, R0, R15, PT ; /* 0x0000000f0000720c */ /* 0x000fd60003f24070 */ /*04b0*/ @!P0 IMAD.IADD R11, R11, 0x1, -R0.reuse ; /* 0x000000010b0b8824 */ /* 0x100fe200078e0a00 */ /*04c0*/ ISETP.GE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */ /* 0x000fe20003f06270 */ /*04d0*/ @!P1 IMAD.IADD R15, R15, 0x1, -R0 ; /* 0x000000010f0f9824 */ /* 0x000fe200078e0a00 */ /*04e0*/ ISETP.GE.AND P1, PT, R13, RZ, PT ; /* 0x000000ff0d00720c */ /* 0x000fe40003f26270 */ /*04f0*/ ISETP.GT.U32.AND P2, PT, R0.reuse, R11, PT ; /* 0x0000000b0000720c */ /* 0x040fe40003f44070 */ /*0500*/ ISETP.GT.U32.AND P3, PT, R0, R15, PT ; /* 0x0000000f0000720c */ /* 0x000fd60003f64070 */ /*0510*/ @!P2 IMAD.IADD R11, R11, 0x1, -R0.reuse ; /* 0x000000010b0ba824 */ /* 0x100fe200078e0a00 */ /*0520*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x16c], PT ; /* 0x00005b00ff007a0c */ /* 0x000fe20003f45270 */ /*0530*/ @!P3 IMAD.IADD R15, R15, 0x1, -R0 ; /* 0x000000010f0fb824 */ /* 0x000fe200078e0a00 */ /*0540*/ LOP3.LUT R0, RZ, c[0x0][0x16c], RZ, 0x33, !PT ; /* 0x00005b00ff007a12 */ /* 0x000fe200078e33ff */ /*0550*/ @!P0 IMAD.MOV R11, RZ, RZ, -R11 ; /* 0x000000ffff0b8224 */ /* 0x000fe400078e0a0b */ /*0560*/ @!P1 IMAD.MOV R15, RZ, RZ, -R15 ; /* 0x000000ffff0f9224 */ /* 0x000fc600078e0a0f */ /*0570*/ SEL R17, R0.reuse, R11, !P2 ; /* 0x0000000b00117207 */ /* 0x040fe40005000000 */ /*0580*/ SHF.R.S32.HI R11, RZ, 0x1f, R9 ; /* 0x0000001fff0b7819 */ /* 0x000fe40000011409 */ /*0590*/ SHF.R.S32.HI R13, RZ, 0x1f, R17 ; /* 0x0000001fff0d7819 */ /* 0x000fe40000011411 */ /*05a0*/ SEL R23, R0, R15, !P2 ; /* 0x0000000f00177207 */ /* 0x000fc80005000000 */ /*05b0*/ SHF.R.S32.HI R19, RZ, 0x1f, R23 ; /* 0x0000001fff137819 */ /* 0x000fe40000011417 */ /*05c0*/ IADD3 R26, P0, R17, R6.reuse, RZ ; /* 0x00000006111a7210 */ /* 0x084fe40007f1e0ff */ /*05d0*/ IADD3 R24, P1, R9, R6.reuse, RZ ; /* 0x0000000609187210 */ /* 0x080fe40007f3e0ff */ /*05e0*/ IADD3 R6, P2, R23, R6, RZ ; /* 0x0000000617067210 */ /* 0x000fe20007f5e0ff */ /*05f0*/ IMAD.X R27, R7, 0x1, R13, P0 ; /* 0x00000001071b7824 */ /* 0x000fe200000e060d */ /*0600*/ IADD3 R20, P0, R17, R2, RZ ; /* 0x0000000211147210 */ /* 0x008fe20007f1e0ff */ /*0610*/ IMAD.X R25, R7, 0x1, R11, P1 ; /* 0x0000000107197824 */ /* 0x000fc600008e060b */ /*0620*/ LDG.E.S8 R0, [R26.64] ; /* 0x000000041a007981 */ /* 0x000ea2000c1e1300 */ /*0630*/ IMAD.X R7, R7, 0x1, R19, P2 ; /* 0x0000000107077824 */ /* 0x000fe400010e0613 */ /*0640*/ IMAD.X R21, R3, 0x1, R13, P0 ; /* 0x0000000103157824 */ /* 0x000fe200000e060d */ /*0650*/ IADD3 R14, P0, R17, R4.reuse, RZ ; /* 0x00000004110e7210 */ /* 0x090fe20007f1e0ff */ /*0660*/ LDG.E.S8 R8, [R24.64] ; /* 0x0000000418087981 */ /* 0x000ee2000c1e1300 */ /*0670*/ IADD3 R12, P1, R9, R4, RZ ; /* 0x00000004090c7210 */ /* 0x000fc60007f3e0ff */ /*0680*/ IMAD.X R15, R5, 0x1, R13, P0 ; /* 0x00000001050f7824 */ /* 0x000fe200000e060d */ /*0690*/ IADD3 R16, P0, R23, R2, RZ ; /* 0x0000000217107210 */ /* 0x000fe20007f1e0ff */ /*06a0*/ IMAD.X R13, R5, 0x1, R11, P1 ; /* 0x00000001050d7824 */ /* 0x000fe200008e060b */ /*06b0*/ LDG.E.S8 R21, [R20.64] ; /* 0x0000000414157981 */ /* 0x000ea6000c1e1300 */ /*06c0*/ IMAD.X R17, R3, 0x1, R19, P0 ; /* 0x0000000103117824 */ /* 0x000fe200000e0613 */ /*06d0*/ LDG.E.S8 R14, [R14.64] ; /* 0x000000040e0e7981 */ /* 0x000ea2000c1e1300 */ /*06e0*/ IADD3 R4, P0, R23, R4, RZ ; /* 0x0000000417047210 */ /* 0x000fc60007f1e0ff */ /*06f0*/ LDG.E.S8 R12, [R12.64] ; /* 0x000000040c0c7981 */ /* 0x000ee4000c1e1300 */ /*0700*/ IMAD.X R5, R5, 0x1, R19, P0 ; /* 0x0000000105057824 */ /* 0x000fe400000e0613 */ /*0710*/ LDG.E.S8 R7, [R6.64] ; /* 0x0000000406077981 */ /* 0x000f28000c1e1300 */ /*0720*/ LDG.E.S8 R16, [R16.64] ; /* 0x0000000410107981 */ /* 0x000f28000c1e1300 */ /*0730*/ LDG.E.S8 R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000f62000c1e1300 */ /*0740*/ IADD3 R2, P1, R9, R2, RZ ; /* 0x0000000209027210 */ /* 0x000fe20007f3e0ff */ /*0750*/ BSSY B0, 0x860 ; /* 0x0000010000007945 */ /* 0x000fe80003800000 */ /*0760*/ IMAD.X R3, R3, 0x1, R11, P1 ; /* 0x0000000103037824 */ /* 0x000fe200008e060b */ /*0770*/ IADD3 R21, R14, R21, R0 ; /* 0x000000150e157210 */ /* 0x004fc80007ffe000 */ /*0780*/ IADD3 R8, R12, R21, R8 ; /* 0x000000150c087210 */ /* 0x008fc80007ffe008 */ /*0790*/ IADD3 R8, R16, R8, R7 ; /* 0x0000000810087210 */ /* 0x010fca0007ffe007 */ /*07a0*/ IMAD.IADD R8, R8, 0x1, R5 ; /* 0x0000000108087824 */ /* 0x020fca00078e0205 */ /*07b0*/ ISETP.NE.AND P0, PT, R8, 0x3, PT ; /* 0x000000030800780c */ /* 0x000fe20003f05270 */ /*07c0*/ IMAD.MOV.U32 R0, RZ, RZ, 0x1 ; /* 0x00000001ff007424 */ /* 0x000fd800078e00ff */ /*07d0*/ @!P0 BRA 0x850 ; /* 0x0000007000008947 */ /* 0x000fea0003800000 */ /*07e0*/ ISETP.NE.AND P0, PT, R8, 0x2, PT ; /* 0x000000020800780c */ /* 0x000fda0003f05270 */ /*07f0*/ @P0 BRA 0x840 ; /* 0x0000004000000947 */ /* 0x000fea0003800000 */ /*0800*/ LDG.E.U8 R0, [R2.64] ; /* 0x0000000402007981 */ /* 0x000ea4000c1e1100 */ /*0810*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x004fc80003f05270 */ /*0820*/ SEL R0, RZ, 0x1, !P0 ; /* 0x00000001ff007807 */ /* 0x000fe20004000000 */ /*0830*/ BRA 0x850 ; /* 0x0000001000007947 */ /* 0x000fea0003800000 */ /*0840*/ PRMT R0, RZ, 0x7610, R0 ; /* 0x00007610ff007816 */ /* 0x000fe40000000000 */ /*0850*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0860*/ STG.E.U8 [R2.64], R0 ; /* 0x0000000002007986 */ /* 0x000fe2000c101104 */ /*0870*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0880*/ BRA 0x880; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0890*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0900*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0910*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0920*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0930*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0940*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0950*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0960*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0970*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected lifeStep .globl lifeStep .p2align 8 .type lifeStep,@function lifeStep: s_clause 0x1 s_load_b64 s[6:7], s[0:1], 0x8 s_load_b32 s2, s[0:1], 0x1c v_bfe_u32 v4, v0, 10, 10 s_load_b64 s[0:1], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_ashr_i32 s3, s6, 31 s_ashr_i32 s5, s7, 31 s_add_i32 s4, s6, s3 s_delay_alu instid0(SALU_CYCLE_1) s_xor_b32 s4, s4, s3 s_add_i32 s3, s7, s5 v_cvt_f32_u32_e32 v1, s4 s_xor_b32 s3, s3, s5 s_and_b32 s5, s2, 0xffff v_cvt_f32_u32_e32 v2, s3 s_lshr_b32 s2, s2, 16 v_rcp_iflag_f32_e32 v5, v1 v_and_b32_e32 v1, 0x3ff, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_rcp_iflag_f32_e32 v6, v2 v_mad_u64_u32 v[2:3], null, s14, s5, v[1:2] s_sub_i32 s5, 0, s3 s_waitcnt_depctr 0xfff v_mul_f32_e32 v3, 0x4f7ffffe, v5 v_mad_u64_u32 v[0:1], null, s15, s2, v[4:5] v_mul_f32_e32 v1, 0x4f7ffffe, v6 v_add_nc_u32_e32 v4, 1, v2 s_delay_alu instid0(VALU_DEP_4) v_cvt_u32_f32_e32 v3, v3 s_sub_i32 s2, 0, s4 v_add3_u32 v6, s6, -1, v2 v_cvt_u32_f32_e32 v5, v1 v_add3_u32 v8, s7, -1, v0 v_mul_lo_u32 v7, s2, v3 v_add_nc_u32_e32 v9, 1, v0 v_ashrrev_i32_e32 v14, 31, v6 v_mul_lo_u32 v12, s5, v5 v_ashrrev_i32_e32 v1, 31, v4 v_ashrrev_i32_e32 v10, 31, v8 v_ashrrev_i32_e32 v11, 31, v9 v_add_nc_u32_e32 v6, v6, v14 v_mul_hi_u32 v7, v3, v7 v_add_nc_u32_e32 v4, v4, v1 v_add_nc_u32_e32 v8, v8, v10 v_mul_hi_u32 v17, v5, v12 v_add_nc_u32_e32 v9, v9, v11 v_xor_b32_e32 v16, v6, v14 v_xor_b32_e32 v15, v4, v1 v_xor_b32_e32 v12, v8, v10 v_add_nc_u32_e32 v3, v3, v7 v_xor_b32_e32 v13, v9, v11 s_mov_b32 s2, 0 v_add_nc_u32_e32 v17, v5, v17 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_mad_u64_u32 v[8:9], null, v16, v3, 0 v_mad_u64_u32 v[6:7], null, v15, v3, 0 v_mad_u64_u32 v[3:4], null, v12, v17, 0 v_mad_u64_u32 v[5:6], null, v13, v17, 0 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_mul_lo_u32 v3, v9, s4 v_mul_lo_u32 v5, v7, s4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v3, v16, v3 v_sub_nc_u32_e32 v5, v15, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v7, s4, v3 v_cmp_le_u32_e32 vcc_lo, s4, v3 v_subrev_nc_u32_e32 v8, s4, v5 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v3, v3, v7, vcc_lo v_cmp_le_u32_e32 vcc_lo, s4, v5 v_subrev_nc_u32_e32 v7, s4, v3 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v5, v5, v8, vcc_lo v_cmp_le_u32_e32 vcc_lo, s4, v3 v_subrev_nc_u32_e32 v8, s4, v5 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v3, v3, v7, vcc_lo v_cmp_le_u32_e32 vcc_lo, s4, v5 v_xor_b32_e32 v3, v3, v14 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v5, v5, v8, vcc_lo v_sub_nc_u32_e32 v7, v3, v14 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_xor_b32_e32 v5, v5, v1 v_ashrrev_i32_e32 v3, 31, v2 v_ashrrev_i32_e32 v8, 31, v7 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_sub_nc_u32_e32 v14, v5, v1 v_lshlrev_b64 v[1:2], 3, v[2:3] v_mul_lo_u32 v3, v4, s3 v_mul_lo_u32 v4, v6, s3 v_lshlrev_b64 v[7:8], 3, v[7:8] v_ashrrev_i32_e32 v15, 31, v14 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshlrev_b64 v[14:15], 3, v[14:15] v_add_co_u32 v7, vcc_lo, s0, v7 s_delay_alu instid0(VALU_DEP_4) v_add_co_ci_u32_e32 v8, vcc_lo, s1, v8, vcc_lo v_add_co_u32 v1, vcc_lo, s0, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo v_add_co_u32 v14, vcc_lo, s0, v14 global_load_b64 v[7:8], v[7:8], off v_add_co_ci_u32_e32 v15, vcc_lo, s1, v15, vcc_lo s_clause 0x1 global_load_b64 v[1:2], v[1:2], off global_load_b64 v[14:15], v[14:15], off v_sub_nc_u32_e32 v3, v12, v3 v_sub_nc_u32_e32 v4, v13, v4 s_mov_b32 s1, exec_lo s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v5, s3, v3 v_cmp_le_u32_e32 vcc_lo, s3, v3 v_subrev_nc_u32_e32 v6, s3, v4 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v3, v3, v5, vcc_lo v_cmp_le_u32_e32 vcc_lo, s3, v4 v_subrev_nc_u32_e32 v5, s3, v3 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v4, v4, v6, vcc_lo v_cmp_le_u32_e32 vcc_lo, s3, v3 v_subrev_nc_u32_e32 v6, s3, v4 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v3, v3, v5, vcc_lo v_cmp_le_u32_e32 vcc_lo, s3, v4 v_xor_b32_e32 v3, v3, v10 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v4, v4, v6, vcc_lo v_sub_nc_u32_e32 v6, v3, v10 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_xor_b32_e32 v4, v4, v11 v_ashrrev_i32_e32 v3, 31, v0 v_ashrrev_i32_e32 v12, 31, v6 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_nc_u32_e32 v13, v4, v11 v_ashrrev_i32_e32 v20, 31, v13 s_waitcnt vmcnt(2) v_add_co_u32 v4, vcc_lo, v7, v6 v_add_co_ci_u32_e32 v5, vcc_lo, v8, v12, vcc_lo s_waitcnt vmcnt(1) v_add_co_u32 v9, vcc_lo, v1, v6 v_add_co_ci_u32_e32 v10, vcc_lo, v2, v12, vcc_lo s_waitcnt vmcnt(0) v_add_co_u32 v11, vcc_lo, v14, v6 v_add_co_ci_u32_e32 v12, vcc_lo, v15, v12, vcc_lo v_add_co_u32 v16, vcc_lo, v7, v0 v_add_co_ci_u32_e32 v17, vcc_lo, v8, v3, vcc_lo v_add_co_u32 v18, vcc_lo, v14, v0 v_add_co_ci_u32_e32 v19, vcc_lo, v15, v3, vcc_lo v_add_co_u32 v6, vcc_lo, v7, v13 v_add_co_ci_u32_e32 v7, vcc_lo, v8, v20, vcc_lo flat_load_i8 v8, v[4:5] flat_load_i8 v9, v[9:10] flat_load_i8 v10, v[11:12] flat_load_i8 v11, v[16:17] flat_load_i8 v12, v[18:19] flat_load_i8 v16, v[6:7] v_add_co_u32 v4, vcc_lo, v1, v13 v_add_co_ci_u32_e32 v5, vcc_lo, v2, v20, vcc_lo v_add_co_u32 v6, vcc_lo, v14, v13 v_add_co_ci_u32_e32 v7, vcc_lo, v15, v20, vcc_lo flat_load_i8 v4, v[4:5] flat_load_i8 v5, v[6:7] s_waitcnt vmcnt(6) lgkmcnt(6) v_add_nc_u32_e32 v6, v9, v8 s_waitcnt vmcnt(4) lgkmcnt(4) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_add3_u32 v6, v6, v10, v11 s_waitcnt vmcnt(2) lgkmcnt(2) v_add3_u32 v6, v6, v12, v16 s_waitcnt vmcnt(0) lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add3_u32 v4, v6, v4, v5 v_cmpx_lt_i32_e32 2, v4 s_xor_b32 s1, exec_lo, s1 v_cmp_ne_u32_e32 vcc_lo, 3, v4 s_mov_b32 s0, -1 s_and_b32 s2, vcc_lo, exec_lo s_or_saveexec_b32 s3, s1 s_mov_b32 s1, 0 s_xor_b32 exec_lo, exec_lo, s3 v_cmp_ne_u32_e32 vcc_lo, 2, v4 s_and_not1_b32 s2, s2, exec_lo s_mov_b32 s1, exec_lo s_and_b32 s4, vcc_lo, exec_lo s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 s2, s2, s4 s_or_b32 exec_lo, exec_lo, s3 s_and_saveexec_b32 s3, s2 s_delay_alu instid0(SALU_CYCLE_1) s_xor_b32 s2, exec_lo, s3 s_and_not1_b32 s0, s0, exec_lo s_and_not1_b32 s1, s1, exec_lo s_or_b32 exec_lo, exec_lo, s2 s_and_saveexec_b32 s2, s1 s_cbranch_execz .LBB0_8 v_add_co_u32 v4, vcc_lo, v1, v0 v_add_co_ci_u32_e32 v5, vcc_lo, v2, v3, vcc_lo s_and_not1_b32 s0, s0, exec_lo flat_load_u8 v4, v[4:5] s_waitcnt vmcnt(0) lgkmcnt(0) v_cmp_ne_u16_e32 vcc_lo, 0, v4 s_and_b32 s1, vcc_lo, exec_lo s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 s0, s0, s1 .LBB0_8: s_or_b32 exec_lo, exec_lo, s2 v_add_co_u32 v0, vcc_lo, v1, v0 v_cndmask_b32_e64 v4, 0, 1, s0 v_add_co_ci_u32_e32 v1, vcc_lo, v2, v3, vcc_lo flat_store_b8 v[0:1], v4 s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel lifeStep .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 21 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size lifeStep, .Lfunc_end0-lifeStep .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: lifeStep .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: lifeStep.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 21 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000bd2df_00000000-6_LifeStep.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z30__device_stub__Z8lifeStepPPciiPPcii .type _Z30__device_stub__Z8lifeStepPPciiPPcii, @function _Z30__device_stub__Z8lifeStepPPciiPPcii: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movq %rsp, %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq lifeStep(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z30__device_stub__Z8lifeStepPPciiPPcii, .-_Z30__device_stub__Z8lifeStepPPciiPPcii .globl lifeStep .type lifeStep, @function lifeStep: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z30__device_stub__Z8lifeStepPPciiPPcii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size lifeStep, .-lifeStep .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "lifeStep" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq lifeStep(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "LifeStep.hip" .globl __device_stub__lifeStep # -- Begin function __device_stub__lifeStep .p2align 4, 0x90 .type __device_stub__lifeStep,@function __device_stub__lifeStep: # @__device_stub__lifeStep .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) movq %rsp, %rax movq %rax, 80(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $lifeStep, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size __device_stub__lifeStep, .Lfunc_end0-__device_stub__lifeStep .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $lifeStep, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type lifeStep,@object # @lifeStep .section .rodata,"a",@progbits .globl lifeStep .p2align 3, 0x0 lifeStep: .quad __device_stub__lifeStep .size lifeStep, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "lifeStep" .size .L__unnamed_1, 9 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__lifeStep .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym lifeStep .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/// ================================================================ /// /// Disclaimer: IMPORTANT: This software was developed at theNT /// National Institute of Standards and Technology by employees of the /// Federal Government in the course of their official duties. /// Pursuant to title 17 Section 105 of the United States Code this /// software is not subject to copyright protection and is in the /// public domain. This is an experimental system. NIST assumes no /// responsibility whatsoever for its use by other parties, and makes /// no guarantees, expressed or implied, about its quality, /// reliability, or any other characteristic. We would appreciate /// acknowledgement if the software is used. This software can be /// redistributed and/or modified freely provided that any derivative /// works bear some notice that they are derived from it, and any /// modified versions bear some notice that they have been modified. /// /// ================================================================ // ================================================================ // // Author: Timothy Blattner // Date: Wed Nov 30 12:36:40 2011 EScufftDoubleComplex // // Functions that execute on the graphics card for doing // Vector computation. // // ================================================================ #include <cuda.h> #include <cufft.h> #include<float.h> #define THREADS_PER_BLOCK 256 #define MIN_DISTANCE 1.0 // ================================================================ __device__ double distance(int x1, int x2, int y1, int y2) { return ((double(x1-x2))*(double(x1-x2)))+ ((double(y1-y2))*(double(y1-y2))); } __device__ bool checkDistance(int *maxesRow, int *maxesCol, int nMax, int curIdx, int width) { int row = curIdx / width; int col = curIdx % width; int j; //double dist; for (j = 0; j < nMax; j++) { if (maxesRow[j] == row && maxesCol[j] == col) return false; //dist = distance(maxesRow[j], row, maxesCol[j], col); //if (dist < MIN_DISTANCE) // return false; } return true; } __device__ bool checkDistance(volatile int *maxesRow, volatile int *maxesCol, int nMax, int curIdx, int width) { int row = curIdx / width; int col = curIdx % width; int j; //double dist; for (j = 0; j < nMax; j++) { if (maxesRow[j] == row && maxesCol[j] == col) return false; // dist = distance(maxesRow[j], row, maxesCol[j], col); // if (dist < MIN_DISTANCE) // return false; } return true; } extern "C" __global__ void elt_prod_conj(cufftDoubleComplex *fc, cufftDoubleComplex * c1, cufftDoubleComplex * c2, int size) { __shared__ cufftDoubleComplex sfc[THREADS_PER_BLOCK]; __shared__ cufftDoubleComplex sc1[THREADS_PER_BLOCK]; __shared__ cufftDoubleComplex sc2[THREADS_PER_BLOCK]; int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; sc1[threadIdx.x] = c1[idx]; sc2[threadIdx.x] = c2[idx]; __syncthreads(); sfc[threadIdx.x] = cuCmul(sc1[threadIdx.x], cuConj(sc2[threadIdx.x])); double mag = cuCabs(sfc[threadIdx.x]); if (mag == 0 || isnan(mag)) { mag = DBL_EPSILON; sfc[threadIdx.x].x = DBL_EPSILON; } fc[idx] = make_cuDoubleComplex(cuCreal(sfc[threadIdx.x]) / mag, cuCimag(sfc[threadIdx.x]) / mag); } extern "C" __global__ void elt_prod_conj_v2(cufftDoubleComplex *fc, cufftDoubleComplex * c1, cufftDoubleComplex * c2, int size) { __shared__ cufftDoubleComplex sfc[THREADS_PER_BLOCK]; int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; //cufftDoubleComplex fc_res; sfc[threadIdx.x] = cuCmul(c1[idx], cuConj(c2[idx])); __syncthreads(); double mag; // mag = sqrt(fc_res.x * fc_res.x + fc_res.y * fc_res.y); mag = sqrt(sfc[threadIdx.x].x * sfc[threadIdx.x].x + sfc[threadIdx.x].y * sfc[threadIdx.x].y); if (isnan(mag) || mag == 0) { mag = DBL_EPSILON; //cuCabs(sfc[threadIdx.x]); sfc[threadIdx.x].x = DBL_EPSILON; } // if (mag == 0) // mag = DBL_EPSILON; fc[idx] = make_cuDoubleComplex(sfc[threadIdx.x].x / mag, sfc[threadIdx.x].y / mag); } extern "C" __global__ void elt_prod_conj_v3(cufftDoubleComplex *fc, cufftDoubleComplex * c1, cufftDoubleComplex *c2, int size) { int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; cufftDoubleComplex _c1 = c1[idx]; cufftDoubleComplex _c2 = c2[idx]; cufftDoubleComplex _fc = cuCmul(_c1, cuConj(_c2)); double mag = sqrt(_fc.x * _fc.x + _fc.y * _fc.y); if (isnan(mag) || mag == 0) mag = cuCabs(_fc); if (mag == 0) mag = DBL_EPSILON; fc[idx] = make_cuDoubleComplex(_fc.x / mag, _fc.y / mag); } extern "C" __global__ void reduce_max_final(double *g_idata, double *g_odata, int * max_idx, unsigned int n, int blockSize) { __shared__ double sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; double myMax = 0.0; int myMaxIndex; while (i < n) { if (myMax < g_idata[i]) { myMax = g_idata[i]; myMaxIndex = max_idx[i]; } if (i+blockSize < n) { if (myMax < g_idata[i+blockSize]) { myMax = g_idata[i+blockSize]; myMaxIndex = max_idx[i+blockSize]; } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } __syncthreads(); } volatile double *vdata = sdata; volatile int *vidxData = idxData; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } if (blockSize >= 4) if (myMax < vdata[tid+2]) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; } } extern "C" __global__ void reduce_max_main(double *g_idata, double *g_odata, int * max_idx, unsigned int n, int blockSize) { __shared__ double sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize) + tid; unsigned int gridSize = blockSize*gridDim.x; double myMax = 0.0; int myMaxIndex; double val; while (i < n) { val = g_idata[i]; if (myMax < val) { myMax = val; myMaxIndex = i; } if (i+blockSize < n) { val = g_idata[i+blockSize]; if (myMax < val) { myMax = val; myMaxIndex = i+blockSize; } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } __syncthreads(); } volatile double *vdata = sdata; volatile int *vidxData = idxData; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } if (blockSize >= 4) if (myMax < vdata[tid+2]) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; } } extern "C" __global__ void reduce_max_filter_final(double *g_idata, double *g_odata, int * max_idx, unsigned int n, unsigned int width, int blockSize, int *maxes, int nMax) { __shared__ int smaxesRow[10]; __shared__ int smaxesCol[10]; __shared__ int smaxesVal[10]; __shared__ double sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; if (tid < nMax) { smaxesVal[tid] = maxes[tid]; smaxesRow[tid] = smaxesVal[tid] / width; smaxesCol[tid] = smaxesVal[tid] % width; } __syncthreads(); double myMax = 0.0; int myMaxIndex; while (i < n) { if (myMax < g_idata[i]) { if (checkDistance(smaxesRow, smaxesCol, nMax, max_idx[i], width)) { myMax = g_idata[i]; myMaxIndex = max_idx[i]; } } if (i+blockSize < n) { if (myMax < g_idata[i+blockSize]) { if (checkDistance(smaxesRow, smaxesCol, nMax, max_idx[i+blockSize], width)) { myMax = g_idata[i+blockSize]; myMaxIndex = max_idx[i+blockSize]; } } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+256], width)) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+128], width)) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+64], width)) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } } __syncthreads(); } volatile double *vdata = sdata; volatile int *vidxData = idxData; volatile int *vsmaxesRow = smaxesRow; volatile int *vsmaxesCol = smaxesCol; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+32], width)) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+16], width)) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+8], width)) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+4], width)) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } } if (blockSize >= 4) if (myMax < vdata[tid+2]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+2], width)) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+1], width)) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; if (gridDim.x == 1) maxes[nMax] = idxData[0]; } } extern "C" __global__ void reduce_max_filter_main(double *g_idata, double *g_odata, int * max_idx, unsigned int width, unsigned int height, int blockSize, int *maxes, int nMax) { __shared__ int smaxesRow[10]; __shared__ int smaxesCol[10]; __shared__ int smaxesVal[10]; __shared__ double sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize) + tid; unsigned int gridSize = blockSize*gridDim.x; if (tid < nMax) { smaxesVal[tid] = maxes[tid]; smaxesRow[tid] = smaxesVal[tid] / width; smaxesCol[tid] = smaxesVal[tid] % width; } __syncthreads(); double myMax = -INFINITY; int myMaxIndex; double val; while (i < width * height) { val = g_idata[i]; if (myMax < val) { // compute distance . . . if (checkDistance(smaxesRow, smaxesCol, nMax, i, width)) { myMax = val; myMaxIndex = i; } } if (i+blockSize < width * height) { val = g_idata[i+blockSize]; if (myMax < val) { if (checkDistance(smaxesRow, smaxesCol, nMax, i+blockSize, width)) { myMax = val; myMaxIndex = i+blockSize; } } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+256], width)) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+128], width)) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+64], width)) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } } __syncthreads(); } volatile double *vdata = sdata; volatile int *vidxData = idxData; volatile int *vsmaxesRow = smaxesRow; volatile int *vsmaxesCol = smaxesCol; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+32], width)) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+16], width)) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+8], width)) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+4], width)) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } } if (blockSize >= 4) if (myMax < vdata[tid+2]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+2], width)) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+1], width)) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; if (gridDim.x == 1) maxes[nMax] = idxData[0]; } } // ================================================================ // ================================================================ // ================================================================ // ================================================================ // ======================= Float versions ========================= // ================================================================ // ================================================================ // ================================================================ // ================================================================ // ================================================================ __device__ float distancef(int x1, int x2, int y1, int y2) { return ((float(x1-x2))*(float(x1-x2)))+ ((float(y1-y2))*(float(y1-y2))); } __device__ bool checkDistancef(int *maxesRow, int *maxesCol, int nMax, int curIdx, int width) { int row = curIdx / width; int col = curIdx % width; int j; for (j = 0; j < nMax; j++) { if (maxesRow[j] == row && maxesCol[j] == col) return false; //dist = distance(maxesRow[j], row, maxesCol[j], col); //if (dist < MIN_DISTANCE) // return false; } return true; } __device__ bool checkDistancef(volatile int *maxesRow, volatile int *maxesCol, int nMax, int curIdx, int width) { int row = curIdx / width; int col = curIdx % width; int j; for (j = 0; j < nMax; j++) { if (maxesRow[j] == row && maxesCol[j] == col) return false; } return true; } extern "C" __global__ void elt_prod_conjf(cufftComplex *fc, cufftComplex * c1, cufftComplex * c2, int size) { __shared__ cufftComplex sfc[THREADS_PER_BLOCK]; __shared__ cufftComplex sc1[THREADS_PER_BLOCK]; __shared__ cufftComplex sc2[THREADS_PER_BLOCK]; int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; sc1[threadIdx.x] = c1[idx]; sc2[threadIdx.x] = c2[idx]; __syncthreads(); sfc[threadIdx.x] = cuCmulf(sc1[threadIdx.x], cuConjf(sc2[threadIdx.x])); float mag = cuCabsf(sfc[threadIdx.x]); if (mag == 0 || isnan(mag)) { mag = FLT_EPSILON; sfc[threadIdx.x].x = FLT_EPSILON; } fc[idx] = make_cuComplex(cuCrealf(sfc[threadIdx.x]) / mag, cuCimagf(sfc[threadIdx.x]) / mag); } extern "C" __global__ void elt_prod_conj_v2f(cufftComplex *fc, cufftComplex * c1, cufftComplex * c2, int size) { __shared__ cufftComplex sfc[THREADS_PER_BLOCK]; int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; //cufftDoubleComplex fc_res; sfc[threadIdx.x] = cuCmulf(c1[idx], cuConjf(c2[idx])); __syncthreads(); float mag; // mag = sqrt(fc_res.x * fc_res.x + fc_res.y * fc_res.y); mag = sqrtf(sfc[threadIdx.x].x * sfc[threadIdx.x].x + sfc[threadIdx.x].y * sfc[threadIdx.x].y); if (isnan(mag) || mag == 0) { mag = FLT_EPSILON; //cuCabs(sfc[threadIdx.x]); sfc[threadIdx.x].x = FLT_EPSILON; } // if (mag == 0) // mag = DBL_EPSILON; fc[idx] = make_cuComplex(sfc[threadIdx.x].x / mag, sfc[threadIdx.x].y / mag); } extern "C" __global__ void elt_prod_conj_v3f(cufftComplex *fc, cufftComplex * c1, cufftComplex *c2, int size) { int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; cufftComplex _c1 = c1[idx]; cufftComplex _c2 = c2[idx]; cufftComplex _fc = cuCmulf(_c1, cuConjf(_c2)); float mag = sqrtf(_fc.x * _fc.x + _fc.y * _fc.y); if (isnan(mag) || mag == 0) mag = cuCabsf(_fc); if (mag == 0) mag = FLT_EPSILON; fc[idx] = make_cuComplex(_fc.x / mag, _fc.y / mag); } extern "C" __global__ void reduce_max_finalf(float *g_idata, float *g_odata, int * max_idx, unsigned int n, int blockSize) { __shared__ float sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; float myMax = 0.0; int myMaxIndex; while (i < n) { if (myMax < g_idata[i]) { myMax = g_idata[i]; myMaxIndex = max_idx[i]; } if (i+blockSize < n) { if (myMax < g_idata[i+blockSize]) { myMax = g_idata[i+blockSize]; myMaxIndex = max_idx[i+blockSize]; } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } __syncthreads(); } volatile float *vdata = sdata; volatile int *vidxData = idxData; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } if (blockSize >= 4) if (myMax < vdata[tid+2]) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; } } extern "C" __global__ void reduce_max_mainf(float *g_idata, float *g_odata, int * max_idx, unsigned int n, int blockSize) { __shared__ float sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize) + tid; unsigned int gridSize = blockSize*gridDim.x; float myMax = 0.0; int myMaxIndex; float val; while (i < n) { val = g_idata[i]; if (myMax < val) { myMax = val; myMaxIndex = i; } if (i+blockSize < n) { val = g_idata[i+blockSize]; if (myMax < val) { myMax = val; myMaxIndex = i+blockSize; } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } __syncthreads(); } volatile float *vdata = sdata; volatile int *vidxData = idxData; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } if (blockSize >= 4) if (myMax < vdata[tid+2]) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; } } extern "C" __global__ void reduce_max_filter_finalf(float *g_idata, float *g_odata, int * max_idx, unsigned int n, unsigned int width, int blockSize, int *maxes, int nMax) { __shared__ int smaxesRow[10]; __shared__ int smaxesCol[10]; __shared__ int smaxesVal[10]; __shared__ float sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; if (tid < nMax) { smaxesVal[tid] = maxes[tid]; smaxesRow[tid] = smaxesVal[tid] / width; smaxesCol[tid] = smaxesVal[tid] % width; } __syncthreads(); float myMax = 0.0; int myMaxIndex; while (i < n) { if (myMax < g_idata[i]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, max_idx[i], width)) { myMax = g_idata[i]; myMaxIndex = max_idx[i]; } } if (i+blockSize < n) { if (myMax < g_idata[i+blockSize]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, max_idx[i+blockSize], width)) { myMax = g_idata[i+blockSize]; myMaxIndex = max_idx[i+blockSize]; } } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+256], width)) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+128], width)) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+64], width)) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } } __syncthreads(); } volatile float *vdata = sdata; volatile int *vidxData = idxData; volatile int *vsmaxesRow = smaxesRow; volatile int *vsmaxesCol = smaxesCol; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+32], width)) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+16], width)) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+8], width)) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+4], width)) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } } if (blockSize >= 4) if (myMax < vdata[tid+2]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+2], width)) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+1], width)) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; if (gridDim.x == 1) maxes[nMax] = idxData[0]; } } extern "C" __global__ void reduce_max_filter_mainf(float *g_idata, float *g_odata, int * max_idx, unsigned int width, unsigned int height, int blockSize, int *maxes, int nMax) { __shared__ int smaxesRow[10]; __shared__ int smaxesCol[10]; __shared__ int smaxesVal[10]; __shared__ float sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize) + tid; unsigned int gridSize = blockSize*gridDim.x; if (tid < nMax) { smaxesVal[tid] = maxes[tid]; smaxesRow[tid] = smaxesVal[tid] / width; smaxesCol[tid] = smaxesVal[tid] % width; } __syncthreads(); float myMax = -INFINITY; int myMaxIndex; float val; while (i < width * height) { val = g_idata[i]; if (myMax < val) { // compute distance . . . if (checkDistancef(smaxesRow, smaxesCol, nMax, i, width)) { myMax = val; myMaxIndex = i; } } if (i+blockSize < width * height) { val = g_idata[i+blockSize]; if (myMax < val) { if (checkDistancef(smaxesRow, smaxesCol, nMax, i+blockSize, width)) { myMax = val; myMaxIndex = i+blockSize; } } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+256], width)) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+128], width)) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+64], width)) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } } __syncthreads(); } volatile float *vdata = sdata; volatile int *vidxData = idxData; volatile int *vsmaxesRow = smaxesRow; volatile int *vsmaxesCol = smaxesCol; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+32], width)) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+16], width)) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+8], width)) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+4], width)) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } } if (blockSize >= 4) if (myMax < vdata[tid+2]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+2], width)) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+1], width)) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; if (gridDim.x == 1) maxes[nMax] = idxData[0]; } } // ================================================================ // Local Variables: // time-stamp-line-limit: 30 // End:
/// ================================================================ /// /// Disclaimer: IMPORTANT: This software was developed at theNT /// National Institute of Standards and Technology by employees of the /// Federal Government in the course of their official duties. /// Pursuant to title 17 Section 105 of the United States Code this /// software is not subject to copyright protection and is in the /// public domain. This is an experimental system. NIST assumes no /// responsibility whatsoever for its use by other parties, and makes /// no guarantees, expressed or implied, about its quality, /// reliability, or any other characteristic. We would appreciate /// acknowledgement if the software is used. This software can be /// redistributed and/or modified freely provided that any derivative /// works bear some notice that they are derived from it, and any /// modified versions bear some notice that they have been modified. /// /// ================================================================ // ================================================================ // // Author: Timothy Blattner // Date: Wed Nov 30 12:36:40 2011 EScufftDoubleComplex // // Functions that execute on the graphics card for doing // Vector computation. // // ================================================================ #include <hip/hip_runtime.h> #include <hipfft/hipfft.h> #include<float.h> #define THREADS_PER_BLOCK 256 #define MIN_DISTANCE 1.0 // ================================================================ __device__ double distance(int x1, int x2, int y1, int y2) { return ((double(x1-x2))*(double(x1-x2)))+ ((double(y1-y2))*(double(y1-y2))); } __device__ bool checkDistance(int *maxesRow, int *maxesCol, int nMax, int curIdx, int width) { int row = curIdx / width; int col = curIdx % width; int j; //double dist; for (j = 0; j < nMax; j++) { if (maxesRow[j] == row && maxesCol[j] == col) return false; //dist = distance(maxesRow[j], row, maxesCol[j], col); //if (dist < MIN_DISTANCE) // return false; } return true; } __device__ bool checkDistance(volatile int *maxesRow, volatile int *maxesCol, int nMax, int curIdx, int width) { int row = curIdx / width; int col = curIdx % width; int j; //double dist; for (j = 0; j < nMax; j++) { if (maxesRow[j] == row && maxesCol[j] == col) return false; // dist = distance(maxesRow[j], row, maxesCol[j], col); // if (dist < MIN_DISTANCE) // return false; } return true; } extern "C" __global__ void elt_prod_conj(hipfftDoubleComplex *fc, hipfftDoubleComplex * c1, hipfftDoubleComplex * c2, int size) { __shared__ hipfftDoubleComplex sfc[THREADS_PER_BLOCK]; __shared__ hipfftDoubleComplex sc1[THREADS_PER_BLOCK]; __shared__ hipfftDoubleComplex sc2[THREADS_PER_BLOCK]; int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; sc1[threadIdx.x] = c1[idx]; sc2[threadIdx.x] = c2[idx]; __syncthreads(); sfc[threadIdx.x] = hipCmul(sc1[threadIdx.x], hipConj(sc2[threadIdx.x])); double mag = hipCabs(sfc[threadIdx.x]); if (mag == 0 || isnan(mag)) { mag = DBL_EPSILON; sfc[threadIdx.x].x = DBL_EPSILON; } fc[idx] = make_hipDoubleComplex(hipCreal(sfc[threadIdx.x]) / mag, hipCimag(sfc[threadIdx.x]) / mag); } extern "C" __global__ void elt_prod_conj_v2(hipfftDoubleComplex *fc, hipfftDoubleComplex * c1, hipfftDoubleComplex * c2, int size) { __shared__ hipfftDoubleComplex sfc[THREADS_PER_BLOCK]; int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; //cufftDoubleComplex fc_res; sfc[threadIdx.x] = hipCmul(c1[idx], hipConj(c2[idx])); __syncthreads(); double mag; // mag = sqrt(fc_res.x * fc_res.x + fc_res.y * fc_res.y); mag = sqrt(sfc[threadIdx.x].x * sfc[threadIdx.x].x + sfc[threadIdx.x].y * sfc[threadIdx.x].y); if (isnan(mag) || mag == 0) { mag = DBL_EPSILON; //cuCabs(sfc[threadIdx.x]); sfc[threadIdx.x].x = DBL_EPSILON; } // if (mag == 0) // mag = DBL_EPSILON; fc[idx] = make_hipDoubleComplex(sfc[threadIdx.x].x / mag, sfc[threadIdx.x].y / mag); } extern "C" __global__ void elt_prod_conj_v3(hipfftDoubleComplex *fc, hipfftDoubleComplex * c1, hipfftDoubleComplex *c2, int size) { int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; hipfftDoubleComplex _c1 = c1[idx]; hipfftDoubleComplex _c2 = c2[idx]; hipfftDoubleComplex _fc = hipCmul(_c1, hipConj(_c2)); double mag = sqrt(_fc.x * _fc.x + _fc.y * _fc.y); if (isnan(mag) || mag == 0) mag = hipCabs(_fc); if (mag == 0) mag = DBL_EPSILON; fc[idx] = make_hipDoubleComplex(_fc.x / mag, _fc.y / mag); } extern "C" __global__ void reduce_max_final(double *g_idata, double *g_odata, int * max_idx, unsigned int n, int blockSize) { __shared__ double sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; double myMax = 0.0; int myMaxIndex; while (i < n) { if (myMax < g_idata[i]) { myMax = g_idata[i]; myMaxIndex = max_idx[i]; } if (i+blockSize < n) { if (myMax < g_idata[i+blockSize]) { myMax = g_idata[i+blockSize]; myMaxIndex = max_idx[i+blockSize]; } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } __syncthreads(); } volatile double *vdata = sdata; volatile int *vidxData = idxData; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } if (blockSize >= 4) if (myMax < vdata[tid+2]) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; } } extern "C" __global__ void reduce_max_main(double *g_idata, double *g_odata, int * max_idx, unsigned int n, int blockSize) { __shared__ double sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize) + tid; unsigned int gridSize = blockSize*gridDim.x; double myMax = 0.0; int myMaxIndex; double val; while (i < n) { val = g_idata[i]; if (myMax < val) { myMax = val; myMaxIndex = i; } if (i+blockSize < n) { val = g_idata[i+blockSize]; if (myMax < val) { myMax = val; myMaxIndex = i+blockSize; } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } __syncthreads(); } volatile double *vdata = sdata; volatile int *vidxData = idxData; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } if (blockSize >= 4) if (myMax < vdata[tid+2]) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; } } extern "C" __global__ void reduce_max_filter_final(double *g_idata, double *g_odata, int * max_idx, unsigned int n, unsigned int width, int blockSize, int *maxes, int nMax) { __shared__ int smaxesRow[10]; __shared__ int smaxesCol[10]; __shared__ int smaxesVal[10]; __shared__ double sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; if (tid < nMax) { smaxesVal[tid] = maxes[tid]; smaxesRow[tid] = smaxesVal[tid] / width; smaxesCol[tid] = smaxesVal[tid] % width; } __syncthreads(); double myMax = 0.0; int myMaxIndex; while (i < n) { if (myMax < g_idata[i]) { if (checkDistance(smaxesRow, smaxesCol, nMax, max_idx[i], width)) { myMax = g_idata[i]; myMaxIndex = max_idx[i]; } } if (i+blockSize < n) { if (myMax < g_idata[i+blockSize]) { if (checkDistance(smaxesRow, smaxesCol, nMax, max_idx[i+blockSize], width)) { myMax = g_idata[i+blockSize]; myMaxIndex = max_idx[i+blockSize]; } } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+256], width)) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+128], width)) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+64], width)) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } } __syncthreads(); } volatile double *vdata = sdata; volatile int *vidxData = idxData; volatile int *vsmaxesRow = smaxesRow; volatile int *vsmaxesCol = smaxesCol; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+32], width)) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+16], width)) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+8], width)) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+4], width)) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } } if (blockSize >= 4) if (myMax < vdata[tid+2]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+2], width)) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+1], width)) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; if (gridDim.x == 1) maxes[nMax] = idxData[0]; } } extern "C" __global__ void reduce_max_filter_main(double *g_idata, double *g_odata, int * max_idx, unsigned int width, unsigned int height, int blockSize, int *maxes, int nMax) { __shared__ int smaxesRow[10]; __shared__ int smaxesCol[10]; __shared__ int smaxesVal[10]; __shared__ double sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize) + tid; unsigned int gridSize = blockSize*gridDim.x; if (tid < nMax) { smaxesVal[tid] = maxes[tid]; smaxesRow[tid] = smaxesVal[tid] / width; smaxesCol[tid] = smaxesVal[tid] % width; } __syncthreads(); double myMax = -INFINITY; int myMaxIndex; double val; while (i < width * height) { val = g_idata[i]; if (myMax < val) { // compute distance . . . if (checkDistance(smaxesRow, smaxesCol, nMax, i, width)) { myMax = val; myMaxIndex = i; } } if (i+blockSize < width * height) { val = g_idata[i+blockSize]; if (myMax < val) { if (checkDistance(smaxesRow, smaxesCol, nMax, i+blockSize, width)) { myMax = val; myMaxIndex = i+blockSize; } } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+256], width)) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+128], width)) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+64], width)) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } } __syncthreads(); } volatile double *vdata = sdata; volatile int *vidxData = idxData; volatile int *vsmaxesRow = smaxesRow; volatile int *vsmaxesCol = smaxesCol; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+32], width)) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+16], width)) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+8], width)) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+4], width)) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } } if (blockSize >= 4) if (myMax < vdata[tid+2]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+2], width)) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+1], width)) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; if (gridDim.x == 1) maxes[nMax] = idxData[0]; } } // ================================================================ // ================================================================ // ================================================================ // ================================================================ // ======================= Float versions ========================= // ================================================================ // ================================================================ // ================================================================ // ================================================================ // ================================================================ __device__ float distancef(int x1, int x2, int y1, int y2) { return ((float(x1-x2))*(float(x1-x2)))+ ((float(y1-y2))*(float(y1-y2))); } __device__ bool checkDistancef(int *maxesRow, int *maxesCol, int nMax, int curIdx, int width) { int row = curIdx / width; int col = curIdx % width; int j; for (j = 0; j < nMax; j++) { if (maxesRow[j] == row && maxesCol[j] == col) return false; //dist = distance(maxesRow[j], row, maxesCol[j], col); //if (dist < MIN_DISTANCE) // return false; } return true; } __device__ bool checkDistancef(volatile int *maxesRow, volatile int *maxesCol, int nMax, int curIdx, int width) { int row = curIdx / width; int col = curIdx % width; int j; for (j = 0; j < nMax; j++) { if (maxesRow[j] == row && maxesCol[j] == col) return false; } return true; } extern "C" __global__ void elt_prod_conjf(hipfftComplex *fc, hipfftComplex * c1, hipfftComplex * c2, int size) { __shared__ hipfftComplex sfc[THREADS_PER_BLOCK]; __shared__ hipfftComplex sc1[THREADS_PER_BLOCK]; __shared__ hipfftComplex sc2[THREADS_PER_BLOCK]; int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; sc1[threadIdx.x] = c1[idx]; sc2[threadIdx.x] = c2[idx]; __syncthreads(); sfc[threadIdx.x] = hipCmulf(sc1[threadIdx.x], hipConjf(sc2[threadIdx.x])); float mag = hipCabsf(sfc[threadIdx.x]); if (mag == 0 || isnan(mag)) { mag = FLT_EPSILON; sfc[threadIdx.x].x = FLT_EPSILON; } fc[idx] = make_hipComplex(hipCrealf(sfc[threadIdx.x]) / mag, hipCimagf(sfc[threadIdx.x]) / mag); } extern "C" __global__ void elt_prod_conj_v2f(hipfftComplex *fc, hipfftComplex * c1, hipfftComplex * c2, int size) { __shared__ hipfftComplex sfc[THREADS_PER_BLOCK]; int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; //cufftDoubleComplex fc_res; sfc[threadIdx.x] = hipCmulf(c1[idx], hipConjf(c2[idx])); __syncthreads(); float mag; // mag = sqrt(fc_res.x * fc_res.x + fc_res.y * fc_res.y); mag = sqrtf(sfc[threadIdx.x].x * sfc[threadIdx.x].x + sfc[threadIdx.x].y * sfc[threadIdx.x].y); if (isnan(mag) || mag == 0) { mag = FLT_EPSILON; //cuCabs(sfc[threadIdx.x]); sfc[threadIdx.x].x = FLT_EPSILON; } // if (mag == 0) // mag = DBL_EPSILON; fc[idx] = make_hipComplex(sfc[threadIdx.x].x / mag, sfc[threadIdx.x].y / mag); } extern "C" __global__ void elt_prod_conj_v3f(hipfftComplex *fc, hipfftComplex * c1, hipfftComplex *c2, int size) { int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; hipfftComplex _c1 = c1[idx]; hipfftComplex _c2 = c2[idx]; hipfftComplex _fc = hipCmulf(_c1, hipConjf(_c2)); float mag = sqrtf(_fc.x * _fc.x + _fc.y * _fc.y); if (isnan(mag) || mag == 0) mag = hipCabsf(_fc); if (mag == 0) mag = FLT_EPSILON; fc[idx] = make_hipComplex(_fc.x / mag, _fc.y / mag); } extern "C" __global__ void reduce_max_finalf(float *g_idata, float *g_odata, int * max_idx, unsigned int n, int blockSize) { __shared__ float sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; float myMax = 0.0; int myMaxIndex; while (i < n) { if (myMax < g_idata[i]) { myMax = g_idata[i]; myMaxIndex = max_idx[i]; } if (i+blockSize < n) { if (myMax < g_idata[i+blockSize]) { myMax = g_idata[i+blockSize]; myMaxIndex = max_idx[i+blockSize]; } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } __syncthreads(); } volatile float *vdata = sdata; volatile int *vidxData = idxData; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } if (blockSize >= 4) if (myMax < vdata[tid+2]) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; } } extern "C" __global__ void reduce_max_mainf(float *g_idata, float *g_odata, int * max_idx, unsigned int n, int blockSize) { __shared__ float sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize) + tid; unsigned int gridSize = blockSize*gridDim.x; float myMax = 0.0; int myMaxIndex; float val; while (i < n) { val = g_idata[i]; if (myMax < val) { myMax = val; myMaxIndex = i; } if (i+blockSize < n) { val = g_idata[i+blockSize]; if (myMax < val) { myMax = val; myMaxIndex = i+blockSize; } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } __syncthreads(); } volatile float *vdata = sdata; volatile int *vidxData = idxData; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } if (blockSize >= 4) if (myMax < vdata[tid+2]) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; } } extern "C" __global__ void reduce_max_filter_finalf(float *g_idata, float *g_odata, int * max_idx, unsigned int n, unsigned int width, int blockSize, int *maxes, int nMax) { __shared__ int smaxesRow[10]; __shared__ int smaxesCol[10]; __shared__ int smaxesVal[10]; __shared__ float sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; if (tid < nMax) { smaxesVal[tid] = maxes[tid]; smaxesRow[tid] = smaxesVal[tid] / width; smaxesCol[tid] = smaxesVal[tid] % width; } __syncthreads(); float myMax = 0.0; int myMaxIndex; while (i < n) { if (myMax < g_idata[i]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, max_idx[i], width)) { myMax = g_idata[i]; myMaxIndex = max_idx[i]; } } if (i+blockSize < n) { if (myMax < g_idata[i+blockSize]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, max_idx[i+blockSize], width)) { myMax = g_idata[i+blockSize]; myMaxIndex = max_idx[i+blockSize]; } } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+256], width)) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+128], width)) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+64], width)) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } } __syncthreads(); } volatile float *vdata = sdata; volatile int *vidxData = idxData; volatile int *vsmaxesRow = smaxesRow; volatile int *vsmaxesCol = smaxesCol; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+32], width)) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+16], width)) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+8], width)) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+4], width)) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } } if (blockSize >= 4) if (myMax < vdata[tid+2]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+2], width)) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+1], width)) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; if (gridDim.x == 1) maxes[nMax] = idxData[0]; } } extern "C" __global__ void reduce_max_filter_mainf(float *g_idata, float *g_odata, int * max_idx, unsigned int width, unsigned int height, int blockSize, int *maxes, int nMax) { __shared__ int smaxesRow[10]; __shared__ int smaxesCol[10]; __shared__ int smaxesVal[10]; __shared__ float sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize) + tid; unsigned int gridSize = blockSize*gridDim.x; if (tid < nMax) { smaxesVal[tid] = maxes[tid]; smaxesRow[tid] = smaxesVal[tid] / width; smaxesCol[tid] = smaxesVal[tid] % width; } __syncthreads(); float myMax = -INFINITY; int myMaxIndex; float val; while (i < width * height) { val = g_idata[i]; if (myMax < val) { // compute distance . . . if (checkDistancef(smaxesRow, smaxesCol, nMax, i, width)) { myMax = val; myMaxIndex = i; } } if (i+blockSize < width * height) { val = g_idata[i+blockSize]; if (myMax < val) { if (checkDistancef(smaxesRow, smaxesCol, nMax, i+blockSize, width)) { myMax = val; myMaxIndex = i+blockSize; } } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+256], width)) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+128], width)) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+64], width)) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } } __syncthreads(); } volatile float *vdata = sdata; volatile int *vidxData = idxData; volatile int *vsmaxesRow = smaxesRow; volatile int *vsmaxesCol = smaxesCol; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+32], width)) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+16], width)) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+8], width)) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+4], width)) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } } if (blockSize >= 4) if (myMax < vdata[tid+2]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+2], width)) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+1], width)) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; if (gridDim.x == 1) maxes[nMax] = idxData[0]; } } // ================================================================ // Local Variables: // time-stamp-line-limit: 30 // End:
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/// ================================================================ /// /// Disclaimer: IMPORTANT: This software was developed at theNT /// National Institute of Standards and Technology by employees of the /// Federal Government in the course of their official duties. /// Pursuant to title 17 Section 105 of the United States Code this /// software is not subject to copyright protection and is in the /// public domain. This is an experimental system. NIST assumes no /// responsibility whatsoever for its use by other parties, and makes /// no guarantees, expressed or implied, about its quality, /// reliability, or any other characteristic. We would appreciate /// acknowledgement if the software is used. This software can be /// redistributed and/or modified freely provided that any derivative /// works bear some notice that they are derived from it, and any /// modified versions bear some notice that they have been modified. /// /// ================================================================ // ================================================================ // // Author: Timothy Blattner // Date: Wed Nov 30 12:36:40 2011 EScufftDoubleComplex // // Functions that execute on the graphics card for doing // Vector computation. // // ================================================================ #include <hip/hip_runtime.h> #include <hipfft/hipfft.h> #include<float.h> #define THREADS_PER_BLOCK 256 #define MIN_DISTANCE 1.0 // ================================================================ __device__ double distance(int x1, int x2, int y1, int y2) { return ((double(x1-x2))*(double(x1-x2)))+ ((double(y1-y2))*(double(y1-y2))); } __device__ bool checkDistance(int *maxesRow, int *maxesCol, int nMax, int curIdx, int width) { int row = curIdx / width; int col = curIdx % width; int j; //double dist; for (j = 0; j < nMax; j++) { if (maxesRow[j] == row && maxesCol[j] == col) return false; //dist = distance(maxesRow[j], row, maxesCol[j], col); //if (dist < MIN_DISTANCE) // return false; } return true; } __device__ bool checkDistance(volatile int *maxesRow, volatile int *maxesCol, int nMax, int curIdx, int width) { int row = curIdx / width; int col = curIdx % width; int j; //double dist; for (j = 0; j < nMax; j++) { if (maxesRow[j] == row && maxesCol[j] == col) return false; // dist = distance(maxesRow[j], row, maxesCol[j], col); // if (dist < MIN_DISTANCE) // return false; } return true; } extern "C" __global__ void elt_prod_conj(hipfftDoubleComplex *fc, hipfftDoubleComplex * c1, hipfftDoubleComplex * c2, int size) { __shared__ hipfftDoubleComplex sfc[THREADS_PER_BLOCK]; __shared__ hipfftDoubleComplex sc1[THREADS_PER_BLOCK]; __shared__ hipfftDoubleComplex sc2[THREADS_PER_BLOCK]; int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; sc1[threadIdx.x] = c1[idx]; sc2[threadIdx.x] = c2[idx]; __syncthreads(); sfc[threadIdx.x] = hipCmul(sc1[threadIdx.x], hipConj(sc2[threadIdx.x])); double mag = hipCabs(sfc[threadIdx.x]); if (mag == 0 || isnan(mag)) { mag = DBL_EPSILON; sfc[threadIdx.x].x = DBL_EPSILON; } fc[idx] = make_hipDoubleComplex(hipCreal(sfc[threadIdx.x]) / mag, hipCimag(sfc[threadIdx.x]) / mag); } extern "C" __global__ void elt_prod_conj_v2(hipfftDoubleComplex *fc, hipfftDoubleComplex * c1, hipfftDoubleComplex * c2, int size) { __shared__ hipfftDoubleComplex sfc[THREADS_PER_BLOCK]; int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; //cufftDoubleComplex fc_res; sfc[threadIdx.x] = hipCmul(c1[idx], hipConj(c2[idx])); __syncthreads(); double mag; // mag = sqrt(fc_res.x * fc_res.x + fc_res.y * fc_res.y); mag = sqrt(sfc[threadIdx.x].x * sfc[threadIdx.x].x + sfc[threadIdx.x].y * sfc[threadIdx.x].y); if (isnan(mag) || mag == 0) { mag = DBL_EPSILON; //cuCabs(sfc[threadIdx.x]); sfc[threadIdx.x].x = DBL_EPSILON; } // if (mag == 0) // mag = DBL_EPSILON; fc[idx] = make_hipDoubleComplex(sfc[threadIdx.x].x / mag, sfc[threadIdx.x].y / mag); } extern "C" __global__ void elt_prod_conj_v3(hipfftDoubleComplex *fc, hipfftDoubleComplex * c1, hipfftDoubleComplex *c2, int size) { int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; hipfftDoubleComplex _c1 = c1[idx]; hipfftDoubleComplex _c2 = c2[idx]; hipfftDoubleComplex _fc = hipCmul(_c1, hipConj(_c2)); double mag = sqrt(_fc.x * _fc.x + _fc.y * _fc.y); if (isnan(mag) || mag == 0) mag = hipCabs(_fc); if (mag == 0) mag = DBL_EPSILON; fc[idx] = make_hipDoubleComplex(_fc.x / mag, _fc.y / mag); } extern "C" __global__ void reduce_max_final(double *g_idata, double *g_odata, int * max_idx, unsigned int n, int blockSize) { __shared__ double sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; double myMax = 0.0; int myMaxIndex; while (i < n) { if (myMax < g_idata[i]) { myMax = g_idata[i]; myMaxIndex = max_idx[i]; } if (i+blockSize < n) { if (myMax < g_idata[i+blockSize]) { myMax = g_idata[i+blockSize]; myMaxIndex = max_idx[i+blockSize]; } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } __syncthreads(); } volatile double *vdata = sdata; volatile int *vidxData = idxData; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } if (blockSize >= 4) if (myMax < vdata[tid+2]) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; } } extern "C" __global__ void reduce_max_main(double *g_idata, double *g_odata, int * max_idx, unsigned int n, int blockSize) { __shared__ double sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize) + tid; unsigned int gridSize = blockSize*gridDim.x; double myMax = 0.0; int myMaxIndex; double val; while (i < n) { val = g_idata[i]; if (myMax < val) { myMax = val; myMaxIndex = i; } if (i+blockSize < n) { val = g_idata[i+blockSize]; if (myMax < val) { myMax = val; myMaxIndex = i+blockSize; } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } __syncthreads(); } volatile double *vdata = sdata; volatile int *vidxData = idxData; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } if (blockSize >= 4) if (myMax < vdata[tid+2]) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; } } extern "C" __global__ void reduce_max_filter_final(double *g_idata, double *g_odata, int * max_idx, unsigned int n, unsigned int width, int blockSize, int *maxes, int nMax) { __shared__ int smaxesRow[10]; __shared__ int smaxesCol[10]; __shared__ int smaxesVal[10]; __shared__ double sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; if (tid < nMax) { smaxesVal[tid] = maxes[tid]; smaxesRow[tid] = smaxesVal[tid] / width; smaxesCol[tid] = smaxesVal[tid] % width; } __syncthreads(); double myMax = 0.0; int myMaxIndex; while (i < n) { if (myMax < g_idata[i]) { if (checkDistance(smaxesRow, smaxesCol, nMax, max_idx[i], width)) { myMax = g_idata[i]; myMaxIndex = max_idx[i]; } } if (i+blockSize < n) { if (myMax < g_idata[i+blockSize]) { if (checkDistance(smaxesRow, smaxesCol, nMax, max_idx[i+blockSize], width)) { myMax = g_idata[i+blockSize]; myMaxIndex = max_idx[i+blockSize]; } } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+256], width)) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+128], width)) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+64], width)) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } } __syncthreads(); } volatile double *vdata = sdata; volatile int *vidxData = idxData; volatile int *vsmaxesRow = smaxesRow; volatile int *vsmaxesCol = smaxesCol; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+32], width)) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+16], width)) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+8], width)) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+4], width)) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } } if (blockSize >= 4) if (myMax < vdata[tid+2]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+2], width)) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+1], width)) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; if (gridDim.x == 1) maxes[nMax] = idxData[0]; } } extern "C" __global__ void reduce_max_filter_main(double *g_idata, double *g_odata, int * max_idx, unsigned int width, unsigned int height, int blockSize, int *maxes, int nMax) { __shared__ int smaxesRow[10]; __shared__ int smaxesCol[10]; __shared__ int smaxesVal[10]; __shared__ double sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize) + tid; unsigned int gridSize = blockSize*gridDim.x; if (tid < nMax) { smaxesVal[tid] = maxes[tid]; smaxesRow[tid] = smaxesVal[tid] / width; smaxesCol[tid] = smaxesVal[tid] % width; } __syncthreads(); double myMax = -INFINITY; int myMaxIndex; double val; while (i < width * height) { val = g_idata[i]; if (myMax < val) { // compute distance . . . if (checkDistance(smaxesRow, smaxesCol, nMax, i, width)) { myMax = val; myMaxIndex = i; } } if (i+blockSize < width * height) { val = g_idata[i+blockSize]; if (myMax < val) { if (checkDistance(smaxesRow, smaxesCol, nMax, i+blockSize, width)) { myMax = val; myMaxIndex = i+blockSize; } } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+256], width)) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+128], width)) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { if (checkDistance(smaxesRow, smaxesCol, nMax, idxData[tid+64], width)) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } } __syncthreads(); } volatile double *vdata = sdata; volatile int *vidxData = idxData; volatile int *vsmaxesRow = smaxesRow; volatile int *vsmaxesCol = smaxesCol; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+32], width)) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+16], width)) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+8], width)) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+4], width)) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } } if (blockSize >= 4) if (myMax < vdata[tid+2]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+2], width)) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { if (checkDistance(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+1], width)) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; if (gridDim.x == 1) maxes[nMax] = idxData[0]; } } // ================================================================ // ================================================================ // ================================================================ // ================================================================ // ======================= Float versions ========================= // ================================================================ // ================================================================ // ================================================================ // ================================================================ // ================================================================ __device__ float distancef(int x1, int x2, int y1, int y2) { return ((float(x1-x2))*(float(x1-x2)))+ ((float(y1-y2))*(float(y1-y2))); } __device__ bool checkDistancef(int *maxesRow, int *maxesCol, int nMax, int curIdx, int width) { int row = curIdx / width; int col = curIdx % width; int j; for (j = 0; j < nMax; j++) { if (maxesRow[j] == row && maxesCol[j] == col) return false; //dist = distance(maxesRow[j], row, maxesCol[j], col); //if (dist < MIN_DISTANCE) // return false; } return true; } __device__ bool checkDistancef(volatile int *maxesRow, volatile int *maxesCol, int nMax, int curIdx, int width) { int row = curIdx / width; int col = curIdx % width; int j; for (j = 0; j < nMax; j++) { if (maxesRow[j] == row && maxesCol[j] == col) return false; } return true; } extern "C" __global__ void elt_prod_conjf(hipfftComplex *fc, hipfftComplex * c1, hipfftComplex * c2, int size) { __shared__ hipfftComplex sfc[THREADS_PER_BLOCK]; __shared__ hipfftComplex sc1[THREADS_PER_BLOCK]; __shared__ hipfftComplex sc2[THREADS_PER_BLOCK]; int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; sc1[threadIdx.x] = c1[idx]; sc2[threadIdx.x] = c2[idx]; __syncthreads(); sfc[threadIdx.x] = hipCmulf(sc1[threadIdx.x], hipConjf(sc2[threadIdx.x])); float mag = hipCabsf(sfc[threadIdx.x]); if (mag == 0 || isnan(mag)) { mag = FLT_EPSILON; sfc[threadIdx.x].x = FLT_EPSILON; } fc[idx] = make_hipComplex(hipCrealf(sfc[threadIdx.x]) / mag, hipCimagf(sfc[threadIdx.x]) / mag); } extern "C" __global__ void elt_prod_conj_v2f(hipfftComplex *fc, hipfftComplex * c1, hipfftComplex * c2, int size) { __shared__ hipfftComplex sfc[THREADS_PER_BLOCK]; int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; //cufftDoubleComplex fc_res; sfc[threadIdx.x] = hipCmulf(c1[idx], hipConjf(c2[idx])); __syncthreads(); float mag; // mag = sqrt(fc_res.x * fc_res.x + fc_res.y * fc_res.y); mag = sqrtf(sfc[threadIdx.x].x * sfc[threadIdx.x].x + sfc[threadIdx.x].y * sfc[threadIdx.x].y); if (isnan(mag) || mag == 0) { mag = FLT_EPSILON; //cuCabs(sfc[threadIdx.x]); sfc[threadIdx.x].x = FLT_EPSILON; } // if (mag == 0) // mag = DBL_EPSILON; fc[idx] = make_hipComplex(sfc[threadIdx.x].x / mag, sfc[threadIdx.x].y / mag); } extern "C" __global__ void elt_prod_conj_v3f(hipfftComplex *fc, hipfftComplex * c1, hipfftComplex *c2, int size) { int idx = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if (idx >= size) return; hipfftComplex _c1 = c1[idx]; hipfftComplex _c2 = c2[idx]; hipfftComplex _fc = hipCmulf(_c1, hipConjf(_c2)); float mag = sqrtf(_fc.x * _fc.x + _fc.y * _fc.y); if (isnan(mag) || mag == 0) mag = hipCabsf(_fc); if (mag == 0) mag = FLT_EPSILON; fc[idx] = make_hipComplex(_fc.x / mag, _fc.y / mag); } extern "C" __global__ void reduce_max_finalf(float *g_idata, float *g_odata, int * max_idx, unsigned int n, int blockSize) { __shared__ float sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; float myMax = 0.0; int myMaxIndex; while (i < n) { if (myMax < g_idata[i]) { myMax = g_idata[i]; myMaxIndex = max_idx[i]; } if (i+blockSize < n) { if (myMax < g_idata[i+blockSize]) { myMax = g_idata[i+blockSize]; myMaxIndex = max_idx[i+blockSize]; } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } __syncthreads(); } volatile float *vdata = sdata; volatile int *vidxData = idxData; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } if (blockSize >= 4) if (myMax < vdata[tid+2]) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; } } extern "C" __global__ void reduce_max_mainf(float *g_idata, float *g_odata, int * max_idx, unsigned int n, int blockSize) { __shared__ float sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize) + tid; unsigned int gridSize = blockSize*gridDim.x; float myMax = 0.0; int myMaxIndex; float val; while (i < n) { val = g_idata[i]; if (myMax < val) { myMax = val; myMaxIndex = i; } if (i+blockSize < n) { val = g_idata[i+blockSize]; if (myMax < val) { myMax = val; myMaxIndex = i+blockSize; } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } __syncthreads(); } volatile float *vdata = sdata; volatile int *vidxData = idxData; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } if (blockSize >= 4) if (myMax < vdata[tid+2]) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; } } extern "C" __global__ void reduce_max_filter_finalf(float *g_idata, float *g_odata, int * max_idx, unsigned int n, unsigned int width, int blockSize, int *maxes, int nMax) { __shared__ int smaxesRow[10]; __shared__ int smaxesCol[10]; __shared__ int smaxesVal[10]; __shared__ float sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; if (tid < nMax) { smaxesVal[tid] = maxes[tid]; smaxesRow[tid] = smaxesVal[tid] / width; smaxesCol[tid] = smaxesVal[tid] % width; } __syncthreads(); float myMax = 0.0; int myMaxIndex; while (i < n) { if (myMax < g_idata[i]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, max_idx[i], width)) { myMax = g_idata[i]; myMaxIndex = max_idx[i]; } } if (i+blockSize < n) { if (myMax < g_idata[i+blockSize]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, max_idx[i+blockSize], width)) { myMax = g_idata[i+blockSize]; myMaxIndex = max_idx[i+blockSize]; } } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+256], width)) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+128], width)) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+64], width)) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } } __syncthreads(); } volatile float *vdata = sdata; volatile int *vidxData = idxData; volatile int *vsmaxesRow = smaxesRow; volatile int *vsmaxesCol = smaxesCol; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+32], width)) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+16], width)) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+8], width)) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+4], width)) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } } if (blockSize >= 4) if (myMax < vdata[tid+2]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+2], width)) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+1], width)) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; if (gridDim.x == 1) maxes[nMax] = idxData[0]; } } extern "C" __global__ void reduce_max_filter_mainf(float *g_idata, float *g_odata, int * max_idx, unsigned int width, unsigned int height, int blockSize, int *maxes, int nMax) { __shared__ int smaxesRow[10]; __shared__ int smaxesCol[10]; __shared__ int smaxesVal[10]; __shared__ float sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize) + tid; unsigned int gridSize = blockSize*gridDim.x; if (tid < nMax) { smaxesVal[tid] = maxes[tid]; smaxesRow[tid] = smaxesVal[tid] / width; smaxesCol[tid] = smaxesVal[tid] % width; } __syncthreads(); float myMax = -INFINITY; int myMaxIndex; float val; while (i < width * height) { val = g_idata[i]; if (myMax < val) { // compute distance . . . if (checkDistancef(smaxesRow, smaxesCol, nMax, i, width)) { myMax = val; myMaxIndex = i; } } if (i+blockSize < width * height) { val = g_idata[i+blockSize]; if (myMax < val) { if (checkDistancef(smaxesRow, smaxesCol, nMax, i+blockSize, width)) { myMax = val; myMaxIndex = i+blockSize; } } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+256], width)) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+128], width)) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { if (checkDistancef(smaxesRow, smaxesCol, nMax, idxData[tid+64], width)) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } } __syncthreads(); } volatile float *vdata = sdata; volatile int *vidxData = idxData; volatile int *vsmaxesRow = smaxesRow; volatile int *vsmaxesCol = smaxesCol; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+32], width)) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+16], width)) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+8], width)) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+4], width)) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } } if (blockSize >= 4) if (myMax < vdata[tid+2]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+2], width)) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { if (checkDistancef(vsmaxesRow, vsmaxesCol, nMax, vidxData[tid+1], width)) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; if (gridDim.x == 1) maxes[nMax] = idxData[0]; } } // ================================================================ // Local Variables: // time-stamp-line-limit: 30 // End:
.text .file "util-cuda-bin.hip" .globl __device_stub__elt_prod_conj # -- Begin function __device_stub__elt_prod_conj .p2align 4, 0x90 .type __device_stub__elt_prod_conj,@function __device_stub__elt_prod_conj: # @__device_stub__elt_prod_conj .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $elt_prod_conj, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size __device_stub__elt_prod_conj, .Lfunc_end0-__device_stub__elt_prod_conj .cfi_endproc # -- End function .globl __device_stub__elt_prod_conj_v2 # -- Begin function __device_stub__elt_prod_conj_v2 .p2align 4, 0x90 .type __device_stub__elt_prod_conj_v2,@function __device_stub__elt_prod_conj_v2: # @__device_stub__elt_prod_conj_v2 .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $elt_prod_conj_v2, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size __device_stub__elt_prod_conj_v2, .Lfunc_end1-__device_stub__elt_prod_conj_v2 .cfi_endproc # -- End function .globl __device_stub__elt_prod_conj_v3 # -- Begin function __device_stub__elt_prod_conj_v3 .p2align 4, 0x90 .type __device_stub__elt_prod_conj_v3,@function __device_stub__elt_prod_conj_v3: # @__device_stub__elt_prod_conj_v3 .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $elt_prod_conj_v3, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end2: .size __device_stub__elt_prod_conj_v3, .Lfunc_end2-__device_stub__elt_prod_conj_v3 .cfi_endproc # -- End function .globl __device_stub__reduce_max_final # -- Begin function __device_stub__reduce_max_final .p2align 4, 0x90 .type __device_stub__reduce_max_final,@function __device_stub__reduce_max_final: # @__device_stub__reduce_max_final .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $reduce_max_final, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end3: .size __device_stub__reduce_max_final, .Lfunc_end3-__device_stub__reduce_max_final .cfi_endproc # -- End function .globl __device_stub__reduce_max_main # -- Begin function __device_stub__reduce_max_main .p2align 4, 0x90 .type __device_stub__reduce_max_main,@function __device_stub__reduce_max_main: # @__device_stub__reduce_max_main .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $reduce_max_main, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end4: .size __device_stub__reduce_max_main, .Lfunc_end4-__device_stub__reduce_max_main .cfi_endproc # -- End function .globl __device_stub__reduce_max_filter_final # -- Begin function __device_stub__reduce_max_filter_final .p2align 4, 0x90 .type __device_stub__reduce_max_filter_final,@function __device_stub__reduce_max_filter_final: # @__device_stub__reduce_max_filter_final .cfi_startproc # %bb.0: subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 20(%rsp), %rax movq %rax, 120(%rsp) leaq 16(%rsp), %rax movq %rax, 128(%rsp) leaq 12(%rsp), %rax movq %rax, 136(%rsp) leaq 176(%rsp), %rax movq %rax, 144(%rsp) leaq 184(%rsp), %rax movq %rax, 152(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $reduce_max_filter_final, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $184, %rsp .cfi_adjust_cfa_offset -184 retq .Lfunc_end5: .size __device_stub__reduce_max_filter_final, .Lfunc_end5-__device_stub__reduce_max_filter_final .cfi_endproc # -- End function .globl __device_stub__reduce_max_filter_main # -- Begin function __device_stub__reduce_max_filter_main .p2align 4, 0x90 .type __device_stub__reduce_max_filter_main,@function __device_stub__reduce_max_filter_main: # @__device_stub__reduce_max_filter_main .cfi_startproc # %bb.0: subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 20(%rsp), %rax movq %rax, 120(%rsp) leaq 16(%rsp), %rax movq %rax, 128(%rsp) leaq 12(%rsp), %rax movq %rax, 136(%rsp) leaq 176(%rsp), %rax movq %rax, 144(%rsp) leaq 184(%rsp), %rax movq %rax, 152(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $reduce_max_filter_main, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $184, %rsp .cfi_adjust_cfa_offset -184 retq .Lfunc_end6: .size __device_stub__reduce_max_filter_main, .Lfunc_end6-__device_stub__reduce_max_filter_main .cfi_endproc # -- End function .globl __device_stub__elt_prod_conjf # -- Begin function __device_stub__elt_prod_conjf .p2align 4, 0x90 .type __device_stub__elt_prod_conjf,@function __device_stub__elt_prod_conjf: # @__device_stub__elt_prod_conjf .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $elt_prod_conjf, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end7: .size __device_stub__elt_prod_conjf, .Lfunc_end7-__device_stub__elt_prod_conjf .cfi_endproc # -- End function .globl __device_stub__elt_prod_conj_v2f # -- Begin function __device_stub__elt_prod_conj_v2f .p2align 4, 0x90 .type __device_stub__elt_prod_conj_v2f,@function __device_stub__elt_prod_conj_v2f: # @__device_stub__elt_prod_conj_v2f .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $elt_prod_conj_v2f, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end8: .size __device_stub__elt_prod_conj_v2f, .Lfunc_end8-__device_stub__elt_prod_conj_v2f .cfi_endproc # -- End function .globl __device_stub__elt_prod_conj_v3f # -- Begin function __device_stub__elt_prod_conj_v3f .p2align 4, 0x90 .type __device_stub__elt_prod_conj_v3f,@function __device_stub__elt_prod_conj_v3f: # @__device_stub__elt_prod_conj_v3f .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $elt_prod_conj_v3f, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end9: .size __device_stub__elt_prod_conj_v3f, .Lfunc_end9-__device_stub__elt_prod_conj_v3f .cfi_endproc # -- End function .globl __device_stub__reduce_max_finalf # -- Begin function __device_stub__reduce_max_finalf .p2align 4, 0x90 .type __device_stub__reduce_max_finalf,@function __device_stub__reduce_max_finalf: # @__device_stub__reduce_max_finalf .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $reduce_max_finalf, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end10: .size __device_stub__reduce_max_finalf, .Lfunc_end10-__device_stub__reduce_max_finalf .cfi_endproc # -- End function .globl __device_stub__reduce_max_mainf # -- Begin function __device_stub__reduce_max_mainf .p2align 4, 0x90 .type __device_stub__reduce_max_mainf,@function __device_stub__reduce_max_mainf: # @__device_stub__reduce_max_mainf .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $reduce_max_mainf, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end11: .size __device_stub__reduce_max_mainf, .Lfunc_end11-__device_stub__reduce_max_mainf .cfi_endproc # -- End function .globl __device_stub__reduce_max_filter_finalf # -- Begin function __device_stub__reduce_max_filter_finalf .p2align 4, 0x90 .type __device_stub__reduce_max_filter_finalf,@function __device_stub__reduce_max_filter_finalf: # @__device_stub__reduce_max_filter_finalf .cfi_startproc # %bb.0: subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 20(%rsp), %rax movq %rax, 120(%rsp) leaq 16(%rsp), %rax movq %rax, 128(%rsp) leaq 12(%rsp), %rax movq %rax, 136(%rsp) leaq 176(%rsp), %rax movq %rax, 144(%rsp) leaq 184(%rsp), %rax movq %rax, 152(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $reduce_max_filter_finalf, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $184, %rsp .cfi_adjust_cfa_offset -184 retq .Lfunc_end12: .size __device_stub__reduce_max_filter_finalf, .Lfunc_end12-__device_stub__reduce_max_filter_finalf .cfi_endproc # -- End function .globl __device_stub__reduce_max_filter_mainf # -- Begin function __device_stub__reduce_max_filter_mainf .p2align 4, 0x90 .type __device_stub__reduce_max_filter_mainf,@function __device_stub__reduce_max_filter_mainf: # @__device_stub__reduce_max_filter_mainf .cfi_startproc # %bb.0: subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 20(%rsp), %rax movq %rax, 120(%rsp) leaq 16(%rsp), %rax movq %rax, 128(%rsp) leaq 12(%rsp), %rax movq %rax, 136(%rsp) leaq 176(%rsp), %rax movq %rax, 144(%rsp) leaq 184(%rsp), %rax movq %rax, 152(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $reduce_max_filter_mainf, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $184, %rsp .cfi_adjust_cfa_offset -184 retq .Lfunc_end13: .size __device_stub__reduce_max_filter_mainf, .Lfunc_end13-__device_stub__reduce_max_filter_mainf .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB14_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB14_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $elt_prod_conj, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $elt_prod_conj_v2, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $elt_prod_conj_v3, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $reduce_max_final, %esi movl $.L__unnamed_4, %edx movl $.L__unnamed_4, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $reduce_max_main, %esi movl $.L__unnamed_5, %edx movl $.L__unnamed_5, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $reduce_max_filter_final, %esi movl $.L__unnamed_6, %edx movl $.L__unnamed_6, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $reduce_max_filter_main, %esi movl $.L__unnamed_7, %edx movl $.L__unnamed_7, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $elt_prod_conjf, %esi movl $.L__unnamed_8, %edx movl $.L__unnamed_8, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $elt_prod_conj_v2f, %esi movl $.L__unnamed_9, %edx movl $.L__unnamed_9, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $elt_prod_conj_v3f, %esi movl $.L__unnamed_10, %edx movl $.L__unnamed_10, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $reduce_max_finalf, %esi movl $.L__unnamed_11, %edx movl $.L__unnamed_11, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $reduce_max_mainf, %esi movl $.L__unnamed_12, %edx movl $.L__unnamed_12, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $reduce_max_filter_finalf, %esi movl $.L__unnamed_13, %edx movl $.L__unnamed_13, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $reduce_max_filter_mainf, %esi movl $.L__unnamed_14, %edx movl $.L__unnamed_14, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end14: .size __hip_module_ctor, .Lfunc_end14-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB15_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB15_2: retq .Lfunc_end15: .size __hip_module_dtor, .Lfunc_end15-__hip_module_dtor .cfi_endproc # -- End function .type elt_prod_conj,@object # @elt_prod_conj .section .rodata,"a",@progbits .globl elt_prod_conj .p2align 3, 0x0 elt_prod_conj: .quad __device_stub__elt_prod_conj .size elt_prod_conj, 8 .type elt_prod_conj_v2,@object # @elt_prod_conj_v2 .globl elt_prod_conj_v2 .p2align 3, 0x0 elt_prod_conj_v2: .quad __device_stub__elt_prod_conj_v2 .size elt_prod_conj_v2, 8 .type elt_prod_conj_v3,@object # @elt_prod_conj_v3 .globl elt_prod_conj_v3 .p2align 3, 0x0 elt_prod_conj_v3: .quad __device_stub__elt_prod_conj_v3 .size elt_prod_conj_v3, 8 .type reduce_max_final,@object # @reduce_max_final .globl reduce_max_final .p2align 3, 0x0 reduce_max_final: .quad __device_stub__reduce_max_final .size reduce_max_final, 8 .type reduce_max_main,@object # @reduce_max_main .globl reduce_max_main .p2align 3, 0x0 reduce_max_main: .quad __device_stub__reduce_max_main .size reduce_max_main, 8 .type reduce_max_filter_final,@object # @reduce_max_filter_final .globl reduce_max_filter_final .p2align 3, 0x0 reduce_max_filter_final: .quad __device_stub__reduce_max_filter_final .size reduce_max_filter_final, 8 .type reduce_max_filter_main,@object # @reduce_max_filter_main .globl reduce_max_filter_main .p2align 3, 0x0 reduce_max_filter_main: .quad __device_stub__reduce_max_filter_main .size reduce_max_filter_main, 8 .type elt_prod_conjf,@object # @elt_prod_conjf .globl elt_prod_conjf .p2align 3, 0x0 elt_prod_conjf: .quad __device_stub__elt_prod_conjf .size elt_prod_conjf, 8 .type elt_prod_conj_v2f,@object # @elt_prod_conj_v2f .globl elt_prod_conj_v2f .p2align 3, 0x0 elt_prod_conj_v2f: .quad __device_stub__elt_prod_conj_v2f .size elt_prod_conj_v2f, 8 .type elt_prod_conj_v3f,@object # @elt_prod_conj_v3f .globl elt_prod_conj_v3f .p2align 3, 0x0 elt_prod_conj_v3f: .quad __device_stub__elt_prod_conj_v3f .size elt_prod_conj_v3f, 8 .type reduce_max_finalf,@object # @reduce_max_finalf .globl reduce_max_finalf .p2align 3, 0x0 reduce_max_finalf: .quad __device_stub__reduce_max_finalf .size reduce_max_finalf, 8 .type reduce_max_mainf,@object # @reduce_max_mainf .globl reduce_max_mainf .p2align 3, 0x0 reduce_max_mainf: .quad __device_stub__reduce_max_mainf .size reduce_max_mainf, 8 .type reduce_max_filter_finalf,@object # @reduce_max_filter_finalf .globl reduce_max_filter_finalf .p2align 3, 0x0 reduce_max_filter_finalf: .quad __device_stub__reduce_max_filter_finalf .size reduce_max_filter_finalf, 8 .type reduce_max_filter_mainf,@object # @reduce_max_filter_mainf .globl reduce_max_filter_mainf .p2align 3, 0x0 reduce_max_filter_mainf: .quad __device_stub__reduce_max_filter_mainf .size reduce_max_filter_mainf, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "elt_prod_conj" .size .L__unnamed_1, 14 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "elt_prod_conj_v2" .size .L__unnamed_2, 17 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "elt_prod_conj_v3" .size .L__unnamed_3, 17 .type .L__unnamed_4,@object # @3 .L__unnamed_4: .asciz "reduce_max_final" .size .L__unnamed_4, 17 .type .L__unnamed_5,@object # @4 .L__unnamed_5: .asciz "reduce_max_main" .size .L__unnamed_5, 16 .type .L__unnamed_6,@object # @5 .L__unnamed_6: .asciz "reduce_max_filter_final" .size .L__unnamed_6, 24 .type .L__unnamed_7,@object # @6 .L__unnamed_7: .asciz "reduce_max_filter_main" .size .L__unnamed_7, 23 .type .L__unnamed_8,@object # @7 .L__unnamed_8: .asciz "elt_prod_conjf" .size .L__unnamed_8, 15 .type .L__unnamed_9,@object # @8 .L__unnamed_9: .asciz "elt_prod_conj_v2f" .size .L__unnamed_9, 18 .type .L__unnamed_10,@object # @9 .L__unnamed_10: .asciz "elt_prod_conj_v3f" .size .L__unnamed_10, 18 .type .L__unnamed_11,@object # @10 .L__unnamed_11: .asciz "reduce_max_finalf" .size .L__unnamed_11, 18 .type .L__unnamed_12,@object # @11 .L__unnamed_12: .asciz "reduce_max_mainf" .size .L__unnamed_12, 17 .type .L__unnamed_13,@object # @12 .L__unnamed_13: .asciz "reduce_max_filter_finalf" .size .L__unnamed_13, 25 .type .L__unnamed_14,@object # @13 .L__unnamed_14: .asciz "reduce_max_filter_mainf" .size .L__unnamed_14, 24 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__elt_prod_conj .addrsig_sym __device_stub__elt_prod_conj_v2 .addrsig_sym __device_stub__elt_prod_conj_v3 .addrsig_sym __device_stub__reduce_max_final .addrsig_sym __device_stub__reduce_max_main .addrsig_sym __device_stub__reduce_max_filter_final .addrsig_sym __device_stub__reduce_max_filter_main .addrsig_sym __device_stub__elt_prod_conjf .addrsig_sym __device_stub__elt_prod_conj_v2f .addrsig_sym __device_stub__elt_prod_conj_v3f .addrsig_sym __device_stub__reduce_max_finalf .addrsig_sym __device_stub__reduce_max_mainf .addrsig_sym __device_stub__reduce_max_filter_finalf .addrsig_sym __device_stub__reduce_max_filter_mainf .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym elt_prod_conj .addrsig_sym elt_prod_conj_v2 .addrsig_sym elt_prod_conj_v3 .addrsig_sym reduce_max_final .addrsig_sym reduce_max_main .addrsig_sym reduce_max_filter_final .addrsig_sym reduce_max_filter_main .addrsig_sym elt_prod_conjf .addrsig_sym elt_prod_conj_v2f .addrsig_sym elt_prod_conj_v3f .addrsig_sym reduce_max_finalf .addrsig_sym reduce_max_mainf .addrsig_sym reduce_max_filter_finalf .addrsig_sym reduce_max_filter_mainf .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include<iostream> #include<cstdlib> #include<cmath> #include<time.h> using namespace std; __global__ void matrixVectorMultiplication(float *a, float *mat, float *c, int n) { int row=threadIdx.x+blockDim.x*blockIdx.x; float sum=0; if(row<n){ for(int j=0;j<n;j++) { sum=sum+mat[row*n+j]*a[j]; } } c[row]=sum; } int main() { float *a, *b, *c, *d; float *dev_a, *dev_b, *dev_c; int n=32*1024; a=(float*)malloc(sizeof(float)*n); b=(float*)malloc(sizeof(float)*n*n); c=(float*)malloc(sizeof(float)*n); d=(float*)malloc(sizeof(float)*n); int i, j; for(i=0; i<n; i++) a[i] = 1.0; c[i] = 1.0; for(i=0; i<n; i++) for(j=0; j<n; j++) b[i*n+j] = 2.0; printf("<<<<<<<<<< initial data:\n"); cudaMalloc((void**)&dev_a, sizeof(float)*n); cudaMalloc((void**)&dev_b, sizeof(float)*n*n); cudaMalloc((void**)&dev_c, sizeof(float)*n); cudaMemcpy(dev_a, a, sizeof(float)*n, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, sizeof(float)*n*n, cudaMemcpyHostToDevice); cudaEvent_t start,end; cudaEventCreate(&start); cudaEventCreate(&end); int threadsPerBlock; threadsPerBlock = 32; int blocksPerGrid; blocksPerGrid = n/threadsPerBlock; cudaEventRecord(start); matrixVectorMultiplication<<<blocksPerGrid,threadsPerBlock>>>(dev_a,dev_b,dev_c,n); cudaEventRecord(end); cudaEventSynchronize(end); float time=0.0; cudaEventElapsedTime(&time,start,end); cudaMemcpy(c,dev_c,sizeof(float)*n,cudaMemcpyDeviceToHost); cout<<"\nGPU Time Elapsed: "<<time; int sum=0; for(int row=0;row<n;row++) { sum=0; for(int col=0;col<n;col++) { sum=sum+a[row*n+col]*b[col]; } d[row]=sum; } //t=clock()-t; //cout<<"\nCPU Time Elapsed: "<<((double)t); //((double)t)/CLOCKS_PER_SEC; int error=0; for(int i=0;i<n;i++){ error+=d[i]-c[i]; // cout<<" gpu "<<c[i]<<" CPU "<<d[i]<<endl; } cout<<"Error : "<<error; return 0; }; void init_array(float *a, const int N) { int i; for(i=0; i<N; i++) a[i] = 1.0; } void init_mat(float *a, const int N, const int M) { int i, j; for(i=0; i<N; i++) for(j=0; j<M; j++) a[i*M+j] = 2.0; }
code for sm_80 Function : _Z26matrixVectorMultiplicationPfS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff067624 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0040*/ BSSY B0, 0xa80 ; /* 0x00000a3000007945 */ /* 0x000fe20003800000 */ /*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e220000002100 */ /*0060*/ HFMA2.MMA R15, -RZ, RZ, 0, 0 ; /* 0x00000000ff0f7435 */ /* 0x000fe200000001ff */ /*0070*/ ISETP.GE.AND P0, PT, R6, 0x1, PT ; /* 0x000000010600780c */ /* 0x000fe20003f06270 */ /*0080*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0090*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], !P0 ; /* 0x00005e0000007a0c */ /* 0x000fda0004706670 */ /*00a0*/ @P0 BRA 0xa70 ; /* 0x000009c000000947 */ /* 0x000fea0003800000 */ /*00b0*/ IADD3 R2, R6.reuse, -0x1, RZ ; /* 0xffffffff06027810 */ /* 0x040fe20007ffe0ff */ /*00c0*/ IMAD.MOV.U32 R15, RZ, RZ, RZ ; /* 0x000000ffff0f7224 */ /* 0x000fe200078e00ff */ /*00d0*/ LOP3.LUT R6, R6, 0x3, RZ, 0xc0, !PT ; /* 0x0000000306067812 */ /* 0x000fe400078ec0ff */ /*00e0*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */ /* 0x000fe40003f06070 */ /*00f0*/ MOV R7, RZ ; /* 0x000000ff00077202 */ /* 0x000fd60000000f00 */ /*0100*/ @!P0 BRA 0x930 ; /* 0x0000082000008947 */ /* 0x000fea0003800000 */ /*0110*/ IADD3 R8, -R6, c[0x0][0x178], RZ ; /* 0x00005e0006087a10 */ /* 0x000fe20007ffe1ff */ /*0120*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fe200078e00ff */ /*0130*/ HFMA2.MMA R15, -RZ, RZ, 0, 0 ; /* 0x00000000ff0f7435 */ /* 0x000fe200000001ff */ /*0140*/ IMAD R2, R0, c[0x0][0x178], RZ ; /* 0x00005e0000027a24 */ /* 0x000fe200078e02ff */ /*0150*/ ISETP.GT.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */ /* 0x000fe20003f04270 */ /*0160*/ IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff077224 */ /* 0x000fe200078e00ff */ /*0170*/ MOV R4, c[0x0][0x160] ; /* 0x0000580000047a02 */ /* 0x000fe20000000f00 */ /*0180*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff057624 */ /* 0x000fe400078e00ff */ /*0190*/ IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */ /* 0x000fd000078e0203 */ /*01a0*/ @!P0 BRA 0x7b0 ; /* 0x0000060000008947 */ /* 0x000fea0003800000 */ /*01b0*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */ /* 0x000fe40003f24270 */ /*01c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*01d0*/ @!P1 BRA 0x570 ; /* 0x0000039000009947 */ /* 0x000fea0003800000 */ /*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*01f0*/ LDG.E R16, [R4.64] ; /* 0x0000000404107981 */ /* 0x000ea8000c1e1900 */ /*0200*/ LDG.E R17, [R2.64] ; /* 0x0000000402117981 */ /* 0x000ea8000c1e1900 */ /*0210*/ LDG.E R18, [R4.64+0x4] ; /* 0x0000040404127981 */ /* 0x000ee8000c1e1900 */ /*0220*/ LDG.E R25, [R2.64+0x4] ; /* 0x0000040402197981 */ /* 0x000ee8000c1e1900 */ /*0230*/ LDG.E R19, [R4.64+0x8] ; /* 0x0000080404137981 */ /* 0x000f28000c1e1900 */ /*0240*/ LDG.E R20, [R2.64+0x8] ; /* 0x0000080402147981 */ /* 0x000f28000c1e1900 */ /*0250*/ LDG.E R22, [R4.64+0xc] ; /* 0x00000c0404167981 */ /* 0x000f68000c1e1900 */ /*0260*/ LDG.E R21, [R2.64+0xc] ; /* 0x00000c0402157981 */ /* 0x000f68000c1e1900 */ /*0270*/ LDG.E R23, [R4.64+0x10] ; /* 0x0000100404177981 */ /* 0x000f68000c1e1900 */ /*0280*/ LDG.E R24, [R2.64+0x10] ; /* 0x0000100402187981 */ /* 0x000f68000c1e1900 */ /*0290*/ LDG.E R9, [R4.64+0x14] ; /* 0x0000140404097981 */ /* 0x000f68000c1e1900 */ /*02a0*/ LDG.E R10, [R2.64+0x14] ; /* 0x00001404020a7981 */ /* 0x000f68000c1e1900 */ /*02b0*/ LDG.E R11, [R4.64+0x18] ; /* 0x00001804040b7981 */ /* 0x000f68000c1e1900 */ /*02c0*/ LDG.E R12, [R2.64+0x18] ; /* 0x00001804020c7981 */ /* 0x000f68000c1e1900 */ /*02d0*/ LDG.E R13, [R4.64+0x1c] ; /* 0x00001c04040d7981 */ /* 0x000f68000c1e1900 */ /*02e0*/ LDG.E R14, [R2.64+0x1c] ; /* 0x00001c04020e7981 */ /* 0x000f62000c1e1900 */ /*02f0*/ FFMA R17, R16, R17, R15 ; /* 0x0000001110117223 */ /* 0x004fc6000000000f */ /*0300*/ LDG.E R15, [R4.64+0x20] ; /* 0x00002004040f7981 */ /* 0x0000a8000c1e1900 */ /*0310*/ LDG.E R16, [R2.64+0x20] ; /* 0x0000200402107981 */ /* 0x0002a2000c1e1900 */ /*0320*/ FFMA R25, R18, R25, R17 ; /* 0x0000001912197223 */ /* 0x008fc60000000011 */ /*0330*/ LDG.E R17, [R4.64+0x24] ; /* 0x0000240404117981 */ /* 0x0000e8000c1e1900 */ /*0340*/ LDG.E R18, [R2.64+0x24] ; /* 0x0000240402127981 */ /* 0x0002e2000c1e1900 */ /*0350*/ FFMA R25, R19, R20, R25 ; /* 0x0000001413197223 */ /* 0x010fc60000000019 */ /*0360*/ LDG.E R19, [R4.64+0x28] ; /* 0x0000280404137981 */ /* 0x000128000c1e1900 */ /*0370*/ LDG.E R20, [R2.64+0x28] ; /* 0x0000280402147981 */ /* 0x000322000c1e1900 */ /*0380*/ FFMA R25, R22, R21, R25 ; /* 0x0000001516197223 */ /* 0x020fc60000000019 */ /*0390*/ LDG.E R21, [R4.64+0x2c] ; /* 0x00002c0404157981 */ /* 0x000168000c1e1900 */ /*03a0*/ LDG.E R22, [R2.64+0x2c] ; /* 0x00002c0402167981 */ /* 0x000362000c1e1900 */ /*03b0*/ FFMA R25, R23, R24, R25 ; /* 0x0000001817197223 */ /* 0x000fc60000000019 */ /*03c0*/ LDG.E R23, [R4.64+0x30] ; /* 0x0000300404177981 */ /* 0x000168000c1e1900 */ /*03d0*/ LDG.E R24, [R2.64+0x30] ; /* 0x0000300402187981 */ /* 0x000362000c1e1900 */ /*03e0*/ FFMA R25, R9, R10, R25 ; /* 0x0000000a09197223 */ /* 0x000fc60000000019 */ /*03f0*/ LDG.E R9, [R4.64+0x34] ; /* 0x0000340404097981 */ /* 0x000168000c1e1900 */ /*0400*/ LDG.E R10, [R2.64+0x34] ; /* 0x00003404020a7981 */ /* 0x000362000c1e1900 */ /*0410*/ FFMA R25, R11, R12, R25 ; /* 0x0000000c0b197223 */ /* 0x000fc60000000019 */ /*0420*/ LDG.E R11, [R4.64+0x38] ; /* 0x00003804040b7981 */ /* 0x000168000c1e1900 */ /*0430*/ LDG.E R12, [R2.64+0x38] ; /* 0x00003804020c7981 */ /* 0x000362000c1e1900 */ /*0440*/ FFMA R25, R13, R14, R25 ; /* 0x0000000e0d197223 */ /* 0x000fc60000000019 */ /*0450*/ LDG.E R13, [R4.64+0x3c] ; /* 0x00003c04040d7981 */ /* 0x000168000c1e1900 */ /*0460*/ LDG.E R14, [R2.64+0x3c] ; /* 0x00003c04020e7981 */ /* 0x000362000c1e1900 */ /*0470*/ IADD3 R8, R8, -0x10, RZ ; /* 0xfffffff008087810 */ /* 0x000fe40007ffe0ff */ /*0480*/ IADD3 R7, R7, 0x10, RZ ; /* 0x0000001007077810 */ /* 0x000fe40007ffe0ff */ /*0490*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */ /* 0x000fe40003f24270 */ /*04a0*/ IADD3 R4, P3, R4, 0x40, RZ ; /* 0x0000004004047810 */ /* 0x001fc40007f7e0ff */ /*04b0*/ IADD3 R2, P2, R2, 0x40, RZ ; /* 0x0000004002027810 */ /* 0x002fe40007f5e0ff */ /*04c0*/ IADD3.X R5, RZ, R5, RZ, P3, !PT ; /* 0x00000005ff057210 */ /* 0x000fc60001ffe4ff */ /*04d0*/ IMAD.X R3, RZ, RZ, R3, P2 ; /* 0x000000ffff037224 */ /* 0x000fe400010e0603 */ /*04e0*/ FFMA R15, R15, R16, R25 ; /* 0x000000100f0f7223 */ /* 0x004fc80000000019 */ /*04f0*/ FFMA R15, R17, R18, R15 ; /* 0x00000012110f7223 */ /* 0x008fc8000000000f */ /*0500*/ FFMA R15, R19, R20, R15 ; /* 0x00000014130f7223 */ /* 0x010fc8000000000f */ /*0510*/ FFMA R15, R21, R22, R15 ; /* 0x00000016150f7223 */ /* 0x020fc8000000000f */ /*0520*/ FFMA R15, R23, R24, R15 ; /* 0x00000018170f7223 */ /* 0x000fc8000000000f */ /*0530*/ FFMA R9, R9, R10, R15 ; /* 0x0000000a09097223 */ /* 0x000fc8000000000f */ /*0540*/ FFMA R9, R11, R12, R9 ; /* 0x0000000c0b097223 */ /* 0x000fc80000000009 */ /*0550*/ FFMA R15, R13, R14, R9 ; /* 0x0000000e0d0f7223 */ /* 0x000fe20000000009 */ /*0560*/ @P1 BRA 0x1f0 ; /* 0xfffffc8000001947 */ /* 0x000fea000383ffff */ /*0570*/ ISETP.GT.AND P1, PT, R8, 0x4, PT ; /* 0x000000040800780c */ /* 0x000fda0003f24270 */ /*0580*/ @!P1 BRA 0x790 ; /* 0x0000020000009947 */ /* 0x000fea0003800000 */ /*0590*/ LDG.E R16, [R4.64] ; /* 0x0000000404107981 */ /* 0x0000a8000c1e1900 */ /*05a0*/ LDG.E R17, [R2.64] ; /* 0x0000000402117981 */ /* 0x000ea8000c1e1900 */ /*05b0*/ LDG.E R19, [R4.64+0x4] ; /* 0x0000040404137981 */ /* 0x0000e8000c1e1900 */ /*05c0*/ LDG.E R18, [R2.64+0x4] ; /* 0x0000040402127981 */ /* 0x000ee8000c1e1900 */ /*05d0*/ LDG.E R21, [R4.64+0x8] ; /* 0x0000080404157981 */ /* 0x000128000c1e1900 */ /*05e0*/ LDG.E R20, [R2.64+0x8] ; /* 0x0000080402147981 */ /* 0x000f28000c1e1900 */ /*05f0*/ LDG.E R23, [R4.64+0xc] ; /* 0x00000c0404177981 */ /* 0x000168000c1e1900 */ /*0600*/ LDG.E R22, [R2.64+0xc] ; /* 0x00000c0402167981 */ /* 0x000f68000c1e1900 */ /*0610*/ LDG.E R25, [R4.64+0x10] ; /* 0x0000100404197981 */ /* 0x000168000c1e1900 */ /*0620*/ LDG.E R24, [R2.64+0x10] ; /* 0x0000100402187981 */ /* 0x000f68000c1e1900 */ /*0630*/ LDG.E R13, [R4.64+0x14] ; /* 0x00001404040d7981 */ /* 0x000168000c1e1900 */ /*0640*/ LDG.E R14, [R2.64+0x14] ; /* 0x00001404020e7981 */ /* 0x000f68000c1e1900 */ /*0650*/ LDG.E R11, [R4.64+0x18] ; /* 0x00001804040b7981 */ /* 0x000168000c1e1900 */ /*0660*/ LDG.E R12, [R2.64+0x18] ; /* 0x00001804020c7981 */ /* 0x000f68000c1e1900 */ /*0670*/ LDG.E R9, [R4.64+0x1c] ; /* 0x00001c0404097981 */ /* 0x000168000c1e1900 */ /*0680*/ LDG.E R10, [R2.64+0x1c] ; /* 0x00001c04020a7981 */ /* 0x000362000c1e1900 */ /*0690*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fc40003f0e170 */ /*06a0*/ IADD3 R7, R7, 0x8, RZ ; /* 0x0000000807077810 */ /* 0x000fe40007ffe0ff */ /*06b0*/ IADD3 R8, R8, -0x8, RZ ; /* 0xfffffff808087810 */ /* 0x000fe40007ffe0ff */ /*06c0*/ IADD3 R4, P2, R4, 0x20, RZ ; /* 0x0000002004047810 */ /* 0x001fca0007f5e0ff */ /*06d0*/ IMAD.X R5, RZ, RZ, R5, P2 ; /* 0x000000ffff057224 */ /* 0x000fe400010e0605 */ /*06e0*/ FFMA R16, R16, R17, R15 ; /* 0x0000001110107223 */ /* 0x004fc8000000000f */ /*06f0*/ FFMA R16, R19, R18, R16 ; /* 0x0000001213107223 */ /* 0x008fc80000000010 */ /*0700*/ FFMA R16, R21, R20, R16 ; /* 0x0000001415107223 */ /* 0x010fc80000000010 */ /*0710*/ FFMA R16, R23, R22, R16 ; /* 0x0000001617107223 */ /* 0x020fc80000000010 */ /*0720*/ FFMA R16, R25, R24, R16 ; /* 0x0000001819107223 */ /* 0x000fc80000000010 */ /*0730*/ FFMA R13, R13, R14, R16 ; /* 0x0000000e0d0d7223 */ /* 0x000fc80000000010 */ /*0740*/ FFMA R11, R11, R12, R13 ; /* 0x0000000c0b0b7223 */ /* 0x000fe2000000000d */ /*0750*/ IADD3 R12, P1, R2, 0x20, RZ ; /* 0x00000020020c7810 */ /* 0x000fc80007f3e0ff */ /*0760*/ IADD3.X R3, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff037210 */ /* 0x002fe40000ffe4ff */ /*0770*/ MOV R2, R12 ; /* 0x0000000c00027202 */ /* 0x000fe20000000f00 */ /*0780*/ FFMA R15, R9, R10, R11 ; /* 0x0000000a090f7223 */ /* 0x000fe4000000000b */ /*0790*/ ISETP.NE.OR P0, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */ /* 0x000fda0000705670 */ /*07a0*/ @!P0 BRA 0x930 ; /* 0x0000018000008947 */ /* 0x000fea0003800000 */ /*07b0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*07c0*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */ /* 0x000ea8000c1e1900 */ /*07d0*/ LDG.E R12, [R4.64+0x4] ; /* 0x00000404040c7981 */ /* 0x0000e8000c1e1900 */ /*07e0*/ LDG.E R11, [R2.64+0x4] ; /* 0x00000404020b7981 */ /* 0x000ee8000c1e1900 */ /*07f0*/ LDG.E R14, [R4.64+0x8] ; /* 0x00000804040e7981 */ /* 0x000128000c1e1900 */ /*0800*/ LDG.E R13, [R2.64+0x8] ; /* 0x00000804020d7981 */ /* 0x000328000c1e1900 */ /*0810*/ LDG.E R16, [R4.64+0xc] ; /* 0x00000c0404107981 */ /* 0x000168000c1e1900 */ /*0820*/ LDG.E R17, [R2.64+0xc] ; /* 0x00000c0402117981 */ /* 0x000362000c1e1900 */ /*0830*/ IADD3 R8, R8, -0x4, RZ ; /* 0xfffffffc08087810 */ /* 0x000fc40007ffe0ff */ /*0840*/ IADD3 R7, R7, 0x4, RZ ; /* 0x0000000407077810 */ /* 0x000fe40007ffe0ff */ /*0850*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */ /* 0x000fe20003f05270 */ /*0860*/ FFMA R9, R10, R9, R15 ; /* 0x000000090a097223 */ /* 0x004fe2000000000f */ /*0870*/ IADD3 R10, P2, R4, 0x10, RZ ; /* 0x00000010040a7810 */ /* 0x000fca0007f5e0ff */ /*0880*/ IMAD.MOV.U32 R4, RZ, RZ, R10 ; /* 0x000000ffff047224 */ /* 0x001fe400078e000a */ /*0890*/ FFMA R9, R12, R11, R9 ; /* 0x0000000b0c097223 */ /* 0x008fe20000000009 */ /*08a0*/ IADD3 R12, P1, R2, 0x10, RZ ; /* 0x00000010020c7810 */ /* 0x000fe20007f3e0ff */ /*08b0*/ IMAD.X R11, RZ, RZ, R5, P2 ; /* 0x000000ffff0b7224 */ /* 0x000fc600010e0605 */ /*08c0*/ MOV R2, R12 ; /* 0x0000000c00027202 */ /* 0x002fe40000000f00 */ /*08d0*/ MOV R5, R11 ; /* 0x0000000b00057202 */ /* 0x000fe20000000f00 */ /*08e0*/ FFMA R9, R14, R13, R9 ; /* 0x0000000d0e097223 */ /* 0x010fe20000000009 */ /*08f0*/ IADD3.X R13, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff0d7210 */ /* 0x000fca0000ffe4ff */ /*0900*/ IMAD.MOV.U32 R3, RZ, RZ, R13 ; /* 0x000000ffff037224 */ /* 0x000fe400078e000d */ /*0910*/ FFMA R15, R16, R17, R9 ; /* 0x00000011100f7223 */ /* 0x020fe20000000009 */ /*0920*/ @P0 BRA 0x7b0 ; /* 0xfffffe8000000947 */ /* 0x000fea000383ffff */ /*0930*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fda0003f05270 */ /*0940*/ @!P0 BRA 0xa70 ; /* 0x0000012000008947 */ /* 0x000fea0003800000 */ /*0950*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0960*/ IMAD R4, R0, c[0x0][0x178], R7 ; /* 0x00005e0000047a24 */ /* 0x000fd200078e0207 */ /*0970*/ IMAD.WIDE R2, R7, R5, c[0x0][0x160] ; /* 0x0000580007027625 */ /* 0x000fc800078e0205 */ /*0980*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */ /* 0x000fe200078e0205 */ /*0990*/ MOV R8, R2 ; /* 0x0000000200087202 */ /* 0x000fc60000000f00 */ /*09a0*/ IMAD.MOV.U32 R7, RZ, RZ, R4 ; /* 0x000000ffff077224 */ /* 0x000fc600078e0004 */ /*09b0*/ MOV R2, R8 ; /* 0x0000000800027202 */ /* 0x000fe40000000f00 */ /*09c0*/ MOV R4, R7 ; /* 0x0000000700047202 */ /* 0x000fc80000000f00 */ /*09d0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x0000a8000c1e1900 */ /*09e0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x0002a2000c1e1900 */ /*09f0*/ IADD3 R6, R6, -0x1, RZ ; /* 0xffffffff06067810 */ /* 0x000fe40007ffe0ff */ /*0a00*/ IADD3 R8, P1, R8, 0x4, RZ ; /* 0x0000000408087810 */ /* 0x000fe40007f3e0ff */ /*0a10*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe40003f05270 */ /*0a20*/ IADD3 R7, P2, R7, 0x4, RZ ; /* 0x0000000407077810 */ /* 0x000fe20007f5e0ff */ /*0a30*/ IMAD.X R3, RZ, RZ, R3, P1 ; /* 0x000000ffff037224 */ /* 0x001fc600008e0603 */ /*0a40*/ IADD3.X R5, RZ, R5, RZ, P2, !PT ; /* 0x00000005ff057210 */ /* 0x002fe200017fe4ff */ /*0a50*/ FFMA R15, R2, R4, R15 ; /* 0x00000004020f7223 */ /* 0x004fcc000000000f */ /*0a60*/ @P0 BRA 0x9b0 ; /* 0xffffff4000000947 */ /* 0x000fea000383ffff */ /*0a70*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0a80*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fd400000001ff */ /*0a90*/ IMAD.WIDE R2, R0, R3, c[0x0][0x170] ; /* 0x00005c0000027625 */ /* 0x000fca00078e0203 */ /*0aa0*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */ /* 0x000fe2000c101904 */ /*0ab0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0ac0*/ BRA 0xac0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0ad0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ae0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0af0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include<iostream> #include<cstdlib> #include<cmath> #include<time.h> using namespace std; __global__ void matrixVectorMultiplication(float *a, float *mat, float *c, int n) { int row=threadIdx.x+blockDim.x*blockIdx.x; float sum=0; if(row<n){ for(int j=0;j<n;j++) { sum=sum+mat[row*n+j]*a[j]; } } c[row]=sum; } int main() { float *a, *b, *c, *d; float *dev_a, *dev_b, *dev_c; int n=32*1024; a=(float*)malloc(sizeof(float)*n); b=(float*)malloc(sizeof(float)*n*n); c=(float*)malloc(sizeof(float)*n); d=(float*)malloc(sizeof(float)*n); int i, j; for(i=0; i<n; i++) a[i] = 1.0; c[i] = 1.0; for(i=0; i<n; i++) for(j=0; j<n; j++) b[i*n+j] = 2.0; printf("<<<<<<<<<< initial data:\n"); cudaMalloc((void**)&dev_a, sizeof(float)*n); cudaMalloc((void**)&dev_b, sizeof(float)*n*n); cudaMalloc((void**)&dev_c, sizeof(float)*n); cudaMemcpy(dev_a, a, sizeof(float)*n, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, sizeof(float)*n*n, cudaMemcpyHostToDevice); cudaEvent_t start,end; cudaEventCreate(&start); cudaEventCreate(&end); int threadsPerBlock; threadsPerBlock = 32; int blocksPerGrid; blocksPerGrid = n/threadsPerBlock; cudaEventRecord(start); matrixVectorMultiplication<<<blocksPerGrid,threadsPerBlock>>>(dev_a,dev_b,dev_c,n); cudaEventRecord(end); cudaEventSynchronize(end); float time=0.0; cudaEventElapsedTime(&time,start,end); cudaMemcpy(c,dev_c,sizeof(float)*n,cudaMemcpyDeviceToHost); cout<<"\nGPU Time Elapsed: "<<time; int sum=0; for(int row=0;row<n;row++) { sum=0; for(int col=0;col<n;col++) { sum=sum+a[row*n+col]*b[col]; } d[row]=sum; } //t=clock()-t; //cout<<"\nCPU Time Elapsed: "<<((double)t); //((double)t)/CLOCKS_PER_SEC; int error=0; for(int i=0;i<n;i++){ error+=d[i]-c[i]; // cout<<" gpu "<<c[i]<<" CPU "<<d[i]<<endl; } cout<<"Error : "<<error; return 0; }; void init_array(float *a, const int N) { int i; for(i=0; i<N; i++) a[i] = 1.0; } void init_mat(float *a, const int N, const int M) { int i, j; for(i=0; i<N; i++) for(j=0; j<M; j++) a[i*M+j] = 2.0; }
.file "tmpxft_00188c7b_00000000-6_matvec_cuda1.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3674: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3674: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z10init_arrayPfi .type _Z10init_arrayPfi, @function _Z10init_arrayPfi: .LFB3670: .cfi_startproc endbr64 testl %esi, %esi jle .L3 movq %rdi, %rax movslq %esi, %rsi leaq (%rdi,%rsi,4), %rdx movss .LC0(%rip), %xmm0 .L5: movss %xmm0, (%rax) addq $4, %rax cmpq %rdx, %rax jne .L5 .L3: ret .cfi_endproc .LFE3670: .size _Z10init_arrayPfi, .-_Z10init_arrayPfi .globl _Z8init_matPfii .type _Z8init_matPfii, @function _Z8init_matPfii: .LFB3671: .cfi_startproc endbr64 testl %esi, %esi jle .L7 movl $0, %r9d movl $0, %r8d movslq %edx, %r10 movss .LC1(%rip), %xmm0 jmp .L9 .L11: movslq %r9d, %rcx leaq (%rdi,%rcx,4), %rax addq %r10, %rcx leaq (%rdi,%rcx,4), %rcx .L10: movss %xmm0, (%rax) addq $4, %rax cmpq %rcx, %rax jne .L10 .L12: addl $1, %r8d addl %edx, %r9d cmpl %r8d, %esi je .L7 .L9: testl %edx, %edx jg .L11 jmp .L12 .L7: ret .cfi_endproc .LFE3671: .size _Z8init_matPfii, .-_Z8init_matPfii .globl _Z51__device_stub__Z26matrixVectorMultiplicationPfS_S_iPfS_S_i .type _Z51__device_stub__Z26matrixVectorMultiplicationPfS_S_iPfS_S_i, @function _Z51__device_stub__Z26matrixVectorMultiplicationPfS_S_iPfS_S_i: .LFB3696: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L18 .L14: movq 136(%rsp), %rax subq %fs:40, %rax jne .L19 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L18: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z26matrixVectorMultiplicationPfS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L14 .L19: call __stack_chk_fail@PLT .cfi_endproc .LFE3696: .size _Z51__device_stub__Z26matrixVectorMultiplicationPfS_S_iPfS_S_i, .-_Z51__device_stub__Z26matrixVectorMultiplicationPfS_S_iPfS_S_i .globl _Z26matrixVectorMultiplicationPfS_S_i .type _Z26matrixVectorMultiplicationPfS_S_i, @function _Z26matrixVectorMultiplicationPfS_S_i: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z51__device_stub__Z26matrixVectorMultiplicationPfS_S_iPfS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _Z26matrixVectorMultiplicationPfS_S_i, .-_Z26matrixVectorMultiplicationPfS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "<<<<<<<<<< initial data:\n" .LC4: .string "\nGPU Time Elapsed: " .LC5: .string "Error : " .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $88, %rsp .cfi_def_cfa_offset 144 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $131072, %edi call malloc@PLT movq %rax, %r14 movabsq $4294967296, %rdi call malloc@PLT movq %rax, %rbp movl $131072, %edi call malloc@PLT movq %rax, %r13 movl $131072, %edi call malloc@PLT movq %rax, %r12 movq %r14, %rbx leaq 131072(%r14), %rdx movq %r14, %rax movss .LC0(%rip), %xmm0 .L23: movss %xmm0, (%rax) addq $4, %rax cmpq %rdx, %rax jne .L23 movl $0x3f800000, 131072(%r13) leaq 131072(%rbp), %rdx movabsq $4295098368, %rcx addq %rbp, %rcx movss .LC1(%rip), %xmm0 .L24: leaq -131072(%rdx), %rax .L25: movss %xmm0, (%rax) addq $4, %rax cmpq %rdx, %rax jne .L25 addq $131072, %rdx cmpq %rcx, %rdx jne .L24 leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 8(%rsp), %rdi movl $131072, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movabsq $4294967296, %r15 movq %r15, %rsi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $131072, %esi call cudaMalloc@PLT movl $1, %ecx movl $131072, %edx movq %r14, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %r15, %rdx movq %rbp, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT leaq 32(%rsp), %rdi call cudaEventCreate@PLT leaq 40(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 32(%rsp), %rdi call cudaEventRecord@PLT movl $32, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1024, 48(%rsp) movl $1, 52(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L38 .L27: movl $0, %esi movq 40(%rsp), %rdi call cudaEventRecord@PLT movq 40(%rsp), %rdi call cudaEventSynchronize@PLT movl $0x00000000, 60(%rsp) leaq 60(%rsp), %rdi movq 40(%rsp), %rdx movq 32(%rsp), %rsi call cudaEventElapsedTime@PLT movl $2, %ecx movl $131072, %edx movq 24(%rsp), %rsi movq %r13, %rdi call cudaMemcpy@PLT leaq .LC4(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi pxor %xmm0, %xmm0 cvtss2sd 60(%rsp), %xmm0 call _ZNSo9_M_insertIdEERSoT_@PLT movl $0, %ecx movl $0, %esi .L28: movl $0, %eax movl %esi, %edx .L29: movss (%rbx,%rax), %xmm0 mulss 0(%rbp,%rax), %xmm0 pxor %xmm1, %xmm1 cvtsi2ssl %edx, %xmm1 addss %xmm1, %xmm0 cvttss2sil %xmm0, %edx addq $4, %rax cmpq $131072, %rax jne .L29 pxor %xmm0, %xmm0 cvtsi2ssl %edx, %xmm0 movss %xmm0, (%r12,%rcx,4) addq $1, %rcx addq $131072, %rbx cmpq $32768, %rcx jne .L28 movl $0, %eax movl $0, %ebx .L30: movss (%r12,%rax), %xmm0 subss 0(%r13,%rax), %xmm0 pxor %xmm1, %xmm1 cvtsi2ssl %ebx, %xmm1 addss %xmm1, %xmm0 cvttss2sil %xmm0, %ebx addq $4, %rax cmpq $131072, %rax jne .L30 leaq .LC5(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl %ebx, %esi call _ZNSolsEi@PLT movq 72(%rsp), %rax subq %fs:40, %rax jne .L39 movl $0, %eax addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L38: .cfi_restore_state movl $32768, %ecx movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z51__device_stub__Z26matrixVectorMultiplicationPfS_S_iPfS_S_i jmp .L27 .L39: call __stack_chk_fail@PLT .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC6: .string "_Z26matrixVectorMultiplicationPfS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3699: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq _Z26matrixVectorMultiplicationPfS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3699: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1065353216 .align 4 .LC1: .long 1073741824 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include<iostream> #include<cstdlib> #include<cmath> #include<time.h> using namespace std; __global__ void matrixVectorMultiplication(float *a, float *mat, float *c, int n) { int row=threadIdx.x+blockDim.x*blockIdx.x; float sum=0; if(row<n){ for(int j=0;j<n;j++) { sum=sum+mat[row*n+j]*a[j]; } } c[row]=sum; } int main() { float *a, *b, *c, *d; float *dev_a, *dev_b, *dev_c; int n=32*1024; a=(float*)malloc(sizeof(float)*n); b=(float*)malloc(sizeof(float)*n*n); c=(float*)malloc(sizeof(float)*n); d=(float*)malloc(sizeof(float)*n); int i, j; for(i=0; i<n; i++) a[i] = 1.0; c[i] = 1.0; for(i=0; i<n; i++) for(j=0; j<n; j++) b[i*n+j] = 2.0; printf("<<<<<<<<<< initial data:\n"); cudaMalloc((void**)&dev_a, sizeof(float)*n); cudaMalloc((void**)&dev_b, sizeof(float)*n*n); cudaMalloc((void**)&dev_c, sizeof(float)*n); cudaMemcpy(dev_a, a, sizeof(float)*n, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, sizeof(float)*n*n, cudaMemcpyHostToDevice); cudaEvent_t start,end; cudaEventCreate(&start); cudaEventCreate(&end); int threadsPerBlock; threadsPerBlock = 32; int blocksPerGrid; blocksPerGrid = n/threadsPerBlock; cudaEventRecord(start); matrixVectorMultiplication<<<blocksPerGrid,threadsPerBlock>>>(dev_a,dev_b,dev_c,n); cudaEventRecord(end); cudaEventSynchronize(end); float time=0.0; cudaEventElapsedTime(&time,start,end); cudaMemcpy(c,dev_c,sizeof(float)*n,cudaMemcpyDeviceToHost); cout<<"\nGPU Time Elapsed: "<<time; int sum=0; for(int row=0;row<n;row++) { sum=0; for(int col=0;col<n;col++) { sum=sum+a[row*n+col]*b[col]; } d[row]=sum; } //t=clock()-t; //cout<<"\nCPU Time Elapsed: "<<((double)t); //((double)t)/CLOCKS_PER_SEC; int error=0; for(int i=0;i<n;i++){ error+=d[i]-c[i]; // cout<<" gpu "<<c[i]<<" CPU "<<d[i]<<endl; } cout<<"Error : "<<error; return 0; }; void init_array(float *a, const int N) { int i; for(i=0; i<N; i++) a[i] = 1.0; } void init_mat(float *a, const int N, const int M) { int i, j; for(i=0; i<N; i++) for(j=0; j<M; j++) a[i*M+j] = 2.0; }
#include <hip/hip_runtime.h> #include<iostream> #include<cstdlib> #include<cmath> #include<time.h> using namespace std; __global__ void matrixVectorMultiplication(float *a, float *mat, float *c, int n) { int row=threadIdx.x+blockDim.x*blockIdx.x; float sum=0; if(row<n){ for(int j=0;j<n;j++) { sum=sum+mat[row*n+j]*a[j]; } } c[row]=sum; } int main() { float *a, *b, *c, *d; float *dev_a, *dev_b, *dev_c; int n=32*1024; a=(float*)malloc(sizeof(float)*n); b=(float*)malloc(sizeof(float)*n*n); c=(float*)malloc(sizeof(float)*n); d=(float*)malloc(sizeof(float)*n); int i, j; for(i=0; i<n; i++) a[i] = 1.0; c[i] = 1.0; for(i=0; i<n; i++) for(j=0; j<n; j++) b[i*n+j] = 2.0; printf("<<<<<<<<<< initial data:\n"); hipMalloc((void**)&dev_a, sizeof(float)*n); hipMalloc((void**)&dev_b, sizeof(float)*n*n); hipMalloc((void**)&dev_c, sizeof(float)*n); hipMemcpy(dev_a, a, sizeof(float)*n, hipMemcpyHostToDevice); hipMemcpy(dev_b, b, sizeof(float)*n*n, hipMemcpyHostToDevice); hipEvent_t start,end; hipEventCreate(&start); hipEventCreate(&end); int threadsPerBlock; threadsPerBlock = 32; int blocksPerGrid; blocksPerGrid = n/threadsPerBlock; hipEventRecord(start); matrixVectorMultiplication<<<blocksPerGrid,threadsPerBlock>>>(dev_a,dev_b,dev_c,n); hipEventRecord(end); hipEventSynchronize(end); float time=0.0; hipEventElapsedTime(&time,start,end); hipMemcpy(c,dev_c,sizeof(float)*n,hipMemcpyDeviceToHost); cout<<"\nGPU Time Elapsed: "<<time; int sum=0; for(int row=0;row<n;row++) { sum=0; for(int col=0;col<n;col++) { sum=sum+a[row*n+col]*b[col]; } d[row]=sum; } //t=clock()-t; //cout<<"\nCPU Time Elapsed: "<<((double)t); //((double)t)/CLOCKS_PER_SEC; int error=0; for(int i=0;i<n;i++){ error+=d[i]-c[i]; // cout<<" gpu "<<c[i]<<" CPU "<<d[i]<<endl; } cout<<"Error : "<<error; return 0; }; void init_array(float *a, const int N) { int i; for(i=0; i<N; i++) a[i] = 1.0; } void init_mat(float *a, const int N, const int M) { int i, j; for(i=0; i<N; i++) for(j=0; j<M; j++) a[i*M+j] = 2.0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include<iostream> #include<cstdlib> #include<cmath> #include<time.h> using namespace std; __global__ void matrixVectorMultiplication(float *a, float *mat, float *c, int n) { int row=threadIdx.x+blockDim.x*blockIdx.x; float sum=0; if(row<n){ for(int j=0;j<n;j++) { sum=sum+mat[row*n+j]*a[j]; } } c[row]=sum; } int main() { float *a, *b, *c, *d; float *dev_a, *dev_b, *dev_c; int n=32*1024; a=(float*)malloc(sizeof(float)*n); b=(float*)malloc(sizeof(float)*n*n); c=(float*)malloc(sizeof(float)*n); d=(float*)malloc(sizeof(float)*n); int i, j; for(i=0; i<n; i++) a[i] = 1.0; c[i] = 1.0; for(i=0; i<n; i++) for(j=0; j<n; j++) b[i*n+j] = 2.0; printf("<<<<<<<<<< initial data:\n"); hipMalloc((void**)&dev_a, sizeof(float)*n); hipMalloc((void**)&dev_b, sizeof(float)*n*n); hipMalloc((void**)&dev_c, sizeof(float)*n); hipMemcpy(dev_a, a, sizeof(float)*n, hipMemcpyHostToDevice); hipMemcpy(dev_b, b, sizeof(float)*n*n, hipMemcpyHostToDevice); hipEvent_t start,end; hipEventCreate(&start); hipEventCreate(&end); int threadsPerBlock; threadsPerBlock = 32; int blocksPerGrid; blocksPerGrid = n/threadsPerBlock; hipEventRecord(start); matrixVectorMultiplication<<<blocksPerGrid,threadsPerBlock>>>(dev_a,dev_b,dev_c,n); hipEventRecord(end); hipEventSynchronize(end); float time=0.0; hipEventElapsedTime(&time,start,end); hipMemcpy(c,dev_c,sizeof(float)*n,hipMemcpyDeviceToHost); cout<<"\nGPU Time Elapsed: "<<time; int sum=0; for(int row=0;row<n;row++) { sum=0; for(int col=0;col<n;col++) { sum=sum+a[row*n+col]*b[col]; } d[row]=sum; } //t=clock()-t; //cout<<"\nCPU Time Elapsed: "<<((double)t); //((double)t)/CLOCKS_PER_SEC; int error=0; for(int i=0;i<n;i++){ error+=d[i]-c[i]; // cout<<" gpu "<<c[i]<<" CPU "<<d[i]<<endl; } cout<<"Error : "<<error; return 0; }; void init_array(float *a, const int N) { int i; for(i=0; i<N; i++) a[i] = 1.0; } void init_mat(float *a, const int N, const int M) { int i, j; for(i=0; i<N; i++) for(j=0; j<M; j++) a[i*M+j] = 2.0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z26matrixVectorMultiplicationPfS_S_i .globl _Z26matrixVectorMultiplicationPfS_S_i .p2align 8 .type _Z26matrixVectorMultiplicationPfS_S_i,@function _Z26matrixVectorMultiplicationPfS_S_i: s_clause 0x1 s_load_b32 s3, s[0:1], 0x2c s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s3, s3, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1] v_max_i32_e32 v0, 0, v1 s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_i32_e32 vcc_lo, s2, v0 v_mov_b32_e32 v0, 0 s_and_saveexec_b32 s3, vcc_lo s_cbranch_execz .LBB0_3 s_load_b128 s[4:7], s[0:1], 0x0 v_mul_lo_u32 v2, v1, s2 v_mov_b32_e32 v0, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[2:3], 2, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s6, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s7, v3, vcc_lo .LBB0_2: global_load_b32 v4, v[2:3], off s_load_b32 s6, s[4:5], 0x0 v_add_co_u32 v2, vcc_lo, v2, 4 s_add_i32 s2, s2, -1 v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo s_add_u32 s4, s4, 4 s_addc_u32 s5, s5, 0 s_cmp_lg_u32 s2, 0 s_waitcnt vmcnt(0) lgkmcnt(0) v_fmac_f32_e32 v0, s6, v4 s_cbranch_scc1 .LBB0_2 .LBB0_3: s_or_b32 exec_lo, exec_lo, s3 s_load_b64 s[0:1], s[0:1], 0x10 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[1:2], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v1, vcc_lo, s0, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo global_store_b32 v[1:2], v0, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z26matrixVectorMultiplicationPfS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z26matrixVectorMultiplicationPfS_S_i, .Lfunc_end0-_Z26matrixVectorMultiplicationPfS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z26matrixVectorMultiplicationPfS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z26matrixVectorMultiplicationPfS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include<iostream> #include<cstdlib> #include<cmath> #include<time.h> using namespace std; __global__ void matrixVectorMultiplication(float *a, float *mat, float *c, int n) { int row=threadIdx.x+blockDim.x*blockIdx.x; float sum=0; if(row<n){ for(int j=0;j<n;j++) { sum=sum+mat[row*n+j]*a[j]; } } c[row]=sum; } int main() { float *a, *b, *c, *d; float *dev_a, *dev_b, *dev_c; int n=32*1024; a=(float*)malloc(sizeof(float)*n); b=(float*)malloc(sizeof(float)*n*n); c=(float*)malloc(sizeof(float)*n); d=(float*)malloc(sizeof(float)*n); int i, j; for(i=0; i<n; i++) a[i] = 1.0; c[i] = 1.0; for(i=0; i<n; i++) for(j=0; j<n; j++) b[i*n+j] = 2.0; printf("<<<<<<<<<< initial data:\n"); hipMalloc((void**)&dev_a, sizeof(float)*n); hipMalloc((void**)&dev_b, sizeof(float)*n*n); hipMalloc((void**)&dev_c, sizeof(float)*n); hipMemcpy(dev_a, a, sizeof(float)*n, hipMemcpyHostToDevice); hipMemcpy(dev_b, b, sizeof(float)*n*n, hipMemcpyHostToDevice); hipEvent_t start,end; hipEventCreate(&start); hipEventCreate(&end); int threadsPerBlock; threadsPerBlock = 32; int blocksPerGrid; blocksPerGrid = n/threadsPerBlock; hipEventRecord(start); matrixVectorMultiplication<<<blocksPerGrid,threadsPerBlock>>>(dev_a,dev_b,dev_c,n); hipEventRecord(end); hipEventSynchronize(end); float time=0.0; hipEventElapsedTime(&time,start,end); hipMemcpy(c,dev_c,sizeof(float)*n,hipMemcpyDeviceToHost); cout<<"\nGPU Time Elapsed: "<<time; int sum=0; for(int row=0;row<n;row++) { sum=0; for(int col=0;col<n;col++) { sum=sum+a[row*n+col]*b[col]; } d[row]=sum; } //t=clock()-t; //cout<<"\nCPU Time Elapsed: "<<((double)t); //((double)t)/CLOCKS_PER_SEC; int error=0; for(int i=0;i<n;i++){ error+=d[i]-c[i]; // cout<<" gpu "<<c[i]<<" CPU "<<d[i]<<endl; } cout<<"Error : "<<error; return 0; }; void init_array(float *a, const int N) { int i; for(i=0; i<N; i++) a[i] = 1.0; } void init_mat(float *a, const int N, const int M) { int i, j; for(i=0; i<N; i++) for(j=0; j<M; j++) a[i*M+j] = 2.0; }
.text .file "matvec_cuda1.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z41__device_stub__matrixVectorMultiplicationPfS_S_i # -- Begin function _Z41__device_stub__matrixVectorMultiplicationPfS_S_i .p2align 4, 0x90 .type _Z41__device_stub__matrixVectorMultiplicationPfS_S_i,@function _Z41__device_stub__matrixVectorMultiplicationPfS_S_i: # @_Z41__device_stub__matrixVectorMultiplicationPfS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z26matrixVectorMultiplicationPfS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z41__device_stub__matrixVectorMultiplicationPfS_S_i, .Lfunc_end0-_Z41__device_stub__matrixVectorMultiplicationPfS_S_i .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $152, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movabsq $4294967296, %r13 # imm = 0x100000000 movl $131072, %edi # imm = 0x20000 callq malloc movq %rax, %r15 movq %r13, %rdi callq malloc movq %rax, %r12 movl $131072, %edi # imm = 0x20000 callq malloc movq %rax, %rbx movl $131072, %edi # imm = 0x20000 callq malloc movq %rax, %r14 xorl %eax, %eax .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movl $1065353216, (%r15,%rax,4) # imm = 0x3F800000 incq %rax cmpq $32768, %rax # imm = 0x8000 jne .LBB1_1 # %bb.2: movl $1065353216, 131072(%rbx) # imm = 0x3F800000 xorl %eax, %eax movq %r12, %rcx .p2align 4, 0x90 .LBB1_3: # %.preheader75 # =>This Loop Header: Depth=1 # Child Loop BB1_4 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB1_4: # Parent Loop BB1_3 Depth=1 # => This Inner Loop Header: Depth=2 movl $1073741824, (%rcx,%rdx,4) # imm = 0x40000000 incq %rdx cmpq $32768, %rdx # imm = 0x8000 jne .LBB1_4 # %bb.5: # in Loop: Header=BB1_3 Depth=1 incq %rax addq $131072, %rcx # imm = 0x20000 cmpq $32768, %rax # imm = 0x8000 jne .LBB1_3 # %bb.6: movl $.Lstr, %edi callq puts@PLT leaq 40(%rsp), %rdi movl $131072, %esi # imm = 0x20000 callq hipMalloc leaq 32(%rsp), %rdi movq %r13, %rsi callq hipMalloc leaq 24(%rsp), %rdi movl $131072, %esi # imm = 0x20000 callq hipMalloc movq 40(%rsp), %rdi movl $131072, %edx # imm = 0x20000 movq %r15, %rsi movl $1, %ecx callq hipMemcpy movq 32(%rsp), %rdi movq %r12, %rsi movq %r13, %rdx movl $1, %ecx callq hipMemcpy leaq 16(%rsp), %rdi callq hipEventCreate movq %rsp, %rdi callq hipEventCreate movq 16(%rsp), %rdi xorl %esi, %esi callq hipEventRecord leaq 32(%r13), %rdx addq $1024, %r13 # imm = 0x400 movq %r13, %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_8 # %bb.7: movq 40(%rsp), %rax movq 32(%rsp), %rcx movq 24(%rsp), %rdx movq %rax, 144(%rsp) movq %rcx, 136(%rsp) movq %rdx, 128(%rsp) movl $32768, 12(%rsp) # imm = 0x8000 leaq 144(%rsp), %rax movq %rax, 48(%rsp) leaq 136(%rsp), %rax movq %rax, 56(%rsp) leaq 128(%rsp), %rax movq %rax, 64(%rsp) leaq 12(%rsp), %rax movq %rax, 72(%rsp) leaq 112(%rsp), %rdi leaq 96(%rsp), %rsi leaq 88(%rsp), %rdx leaq 80(%rsp), %rcx callq __hipPopCallConfiguration movq 112(%rsp), %rsi movl 120(%rsp), %edx movq 96(%rsp), %rcx movl 104(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z26matrixVectorMultiplicationPfS_S_i, %edi pushq 80(%rsp) .cfi_adjust_cfa_offset 8 pushq 96(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_8: movq (%rsp), %rdi xorl %r13d, %r13d xorl %esi, %esi callq hipEventRecord movq (%rsp), %rdi callq hipEventSynchronize movl $0, 48(%rsp) movq 16(%rsp), %rsi movq (%rsp), %rdx leaq 48(%rsp), %rdi callq hipEventElapsedTime movq 24(%rsp), %rsi movl $131072, %edx # imm = 0x20000 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $20, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movss 48(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ .p2align 4, 0x90 .LBB1_9: # %.preheader74 # =>This Loop Header: Depth=1 # Child Loop BB1_10 Depth 2 xorl %eax, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB1_10: # Parent Loop BB1_9 Depth=1 # => This Inner Loop Header: Depth=2 xorps %xmm0, %xmm0 cvtsi2ss %ecx, %xmm0 movss (%r15,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss (%r12,%rax,4), %xmm1 addss %xmm0, %xmm1 cvttss2si %xmm1, %ecx incq %rax cmpq $32768, %rax # imm = 0x8000 jne .LBB1_10 # %bb.11: # in Loop: Header=BB1_9 Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %ecx, %xmm0 movss %xmm0, (%r14,%r13,4) incq %r13 addq $131072, %r15 # imm = 0x20000 cmpq $32768, %r13 # imm = 0x8000 jne .LBB1_9 # %bb.12: # %.preheader.preheader xorl %eax, %eax xorl %ebp, %ebp .p2align 4, 0x90 .LBB1_13: # %.preheader # =>This Inner Loop Header: Depth=1 movss (%r14,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero subss (%rbx,%rax,4), %xmm0 xorps %xmm1, %xmm1 cvtsi2ss %ebp, %xmm1 addss %xmm0, %xmm1 cvttss2si %xmm1, %ebp incq %rax cmpq $32768, %rax # imm = 0x8000 jne .LBB1_13 # %bb.14: movl $_ZSt4cout, %edi movl $.L.str.2, %esi movl $8, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movl %ebp, %esi callq _ZNSolsEi xorl %eax, %eax addq $152, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .globl _Z10init_arrayPfi # -- Begin function _Z10init_arrayPfi .p2align 4, 0x90 .type _Z10init_arrayPfi,@function _Z10init_arrayPfi: # @_Z10init_arrayPfi .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB2_3 # %bb.1: # %.lr.ph.preheader movl %esi, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB2_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movl $1065353216, (%rdi,%rcx,4) # imm = 0x3F800000 incq %rcx cmpq %rcx, %rax jne .LBB2_2 .LBB2_3: # %._crit_edge retq .Lfunc_end2: .size _Z10init_arrayPfi, .Lfunc_end2-_Z10init_arrayPfi .cfi_endproc # -- End function .globl _Z8init_matPfii # -- Begin function _Z8init_matPfii .p2align 4, 0x90 .type _Z8init_matPfii,@function _Z8init_matPfii: # @_Z8init_matPfii .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB3_6 # %bb.1: # %.preheader.lr.ph movl %esi, %eax movl %edx, %ecx xorl %esi, %esi xorl %r8d, %r8d jmp .LBB3_2 .p2align 4, 0x90 .LBB3_5: # %._crit_edge # in Loop: Header=BB3_2 Depth=1 incq %r8 addl %edx, %esi cmpq %rax, %r8 je .LBB3_6 .LBB3_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB3_4 Depth 2 testl %edx, %edx jle .LBB3_5 # %bb.3: # %.lr.ph # in Loop: Header=BB3_2 Depth=1 movl %esi, %r9d leaq (%rdi,%r9,4), %r9 xorl %r10d, %r10d .p2align 4, 0x90 .LBB3_4: # Parent Loop BB3_2 Depth=1 # => This Inner Loop Header: Depth=2 movl $1073741824, (%r9,%r10,4) # imm = 0x40000000 incq %r10 cmpq %r10, %rcx jne .LBB3_4 jmp .LBB3_5 .LBB3_6: # %._crit_edge13 retq .Lfunc_end3: .size _Z8init_matPfii, .Lfunc_end3-_Z8init_matPfii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z26matrixVectorMultiplicationPfS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z26matrixVectorMultiplicationPfS_S_i,@object # @_Z26matrixVectorMultiplicationPfS_S_i .section .rodata,"a",@progbits .globl _Z26matrixVectorMultiplicationPfS_S_i .p2align 3, 0x0 _Z26matrixVectorMultiplicationPfS_S_i: .quad _Z41__device_stub__matrixVectorMultiplicationPfS_S_i .size _Z26matrixVectorMultiplicationPfS_S_i, 8 .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "\nGPU Time Elapsed: " .size .L.str.1, 21 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "Error : " .size .L.str.2, 9 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z26matrixVectorMultiplicationPfS_S_i" .size .L__unnamed_1, 38 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "<<<<<<<<<< initial data:" .size .Lstr, 25 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z41__device_stub__matrixVectorMultiplicationPfS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z26matrixVectorMultiplicationPfS_S_i .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z26matrixVectorMultiplicationPfS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff067624 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0040*/ BSSY B0, 0xa80 ; /* 0x00000a3000007945 */ /* 0x000fe20003800000 */ /*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e220000002100 */ /*0060*/ HFMA2.MMA R15, -RZ, RZ, 0, 0 ; /* 0x00000000ff0f7435 */ /* 0x000fe200000001ff */ /*0070*/ ISETP.GE.AND P0, PT, R6, 0x1, PT ; /* 0x000000010600780c */ /* 0x000fe20003f06270 */ /*0080*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0090*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], !P0 ; /* 0x00005e0000007a0c */ /* 0x000fda0004706670 */ /*00a0*/ @P0 BRA 0xa70 ; /* 0x000009c000000947 */ /* 0x000fea0003800000 */ /*00b0*/ IADD3 R2, R6.reuse, -0x1, RZ ; /* 0xffffffff06027810 */ /* 0x040fe20007ffe0ff */ /*00c0*/ IMAD.MOV.U32 R15, RZ, RZ, RZ ; /* 0x000000ffff0f7224 */ /* 0x000fe200078e00ff */ /*00d0*/ LOP3.LUT R6, R6, 0x3, RZ, 0xc0, !PT ; /* 0x0000000306067812 */ /* 0x000fe400078ec0ff */ /*00e0*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */ /* 0x000fe40003f06070 */ /*00f0*/ MOV R7, RZ ; /* 0x000000ff00077202 */ /* 0x000fd60000000f00 */ /*0100*/ @!P0 BRA 0x930 ; /* 0x0000082000008947 */ /* 0x000fea0003800000 */ /*0110*/ IADD3 R8, -R6, c[0x0][0x178], RZ ; /* 0x00005e0006087a10 */ /* 0x000fe20007ffe1ff */ /*0120*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fe200078e00ff */ /*0130*/ HFMA2.MMA R15, -RZ, RZ, 0, 0 ; /* 0x00000000ff0f7435 */ /* 0x000fe200000001ff */ /*0140*/ IMAD R2, R0, c[0x0][0x178], RZ ; /* 0x00005e0000027a24 */ /* 0x000fe200078e02ff */ /*0150*/ ISETP.GT.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */ /* 0x000fe20003f04270 */ /*0160*/ IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff077224 */ /* 0x000fe200078e00ff */ /*0170*/ MOV R4, c[0x0][0x160] ; /* 0x0000580000047a02 */ /* 0x000fe20000000f00 */ /*0180*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff057624 */ /* 0x000fe400078e00ff */ /*0190*/ IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */ /* 0x000fd000078e0203 */ /*01a0*/ @!P0 BRA 0x7b0 ; /* 0x0000060000008947 */ /* 0x000fea0003800000 */ /*01b0*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */ /* 0x000fe40003f24270 */ /*01c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*01d0*/ @!P1 BRA 0x570 ; /* 0x0000039000009947 */ /* 0x000fea0003800000 */ /*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*01f0*/ LDG.E R16, [R4.64] ; /* 0x0000000404107981 */ /* 0x000ea8000c1e1900 */ /*0200*/ LDG.E R17, [R2.64] ; /* 0x0000000402117981 */ /* 0x000ea8000c1e1900 */ /*0210*/ LDG.E R18, [R4.64+0x4] ; /* 0x0000040404127981 */ /* 0x000ee8000c1e1900 */ /*0220*/ LDG.E R25, [R2.64+0x4] ; /* 0x0000040402197981 */ /* 0x000ee8000c1e1900 */ /*0230*/ LDG.E R19, [R4.64+0x8] ; /* 0x0000080404137981 */ /* 0x000f28000c1e1900 */ /*0240*/ LDG.E R20, [R2.64+0x8] ; /* 0x0000080402147981 */ /* 0x000f28000c1e1900 */ /*0250*/ LDG.E R22, [R4.64+0xc] ; /* 0x00000c0404167981 */ /* 0x000f68000c1e1900 */ /*0260*/ LDG.E R21, [R2.64+0xc] ; /* 0x00000c0402157981 */ /* 0x000f68000c1e1900 */ /*0270*/ LDG.E R23, [R4.64+0x10] ; /* 0x0000100404177981 */ /* 0x000f68000c1e1900 */ /*0280*/ LDG.E R24, [R2.64+0x10] ; /* 0x0000100402187981 */ /* 0x000f68000c1e1900 */ /*0290*/ LDG.E R9, [R4.64+0x14] ; /* 0x0000140404097981 */ /* 0x000f68000c1e1900 */ /*02a0*/ LDG.E R10, [R2.64+0x14] ; /* 0x00001404020a7981 */ /* 0x000f68000c1e1900 */ /*02b0*/ LDG.E R11, [R4.64+0x18] ; /* 0x00001804040b7981 */ /* 0x000f68000c1e1900 */ /*02c0*/ LDG.E R12, [R2.64+0x18] ; /* 0x00001804020c7981 */ /* 0x000f68000c1e1900 */ /*02d0*/ LDG.E R13, [R4.64+0x1c] ; /* 0x00001c04040d7981 */ /* 0x000f68000c1e1900 */ /*02e0*/ LDG.E R14, [R2.64+0x1c] ; /* 0x00001c04020e7981 */ /* 0x000f62000c1e1900 */ /*02f0*/ FFMA R17, R16, R17, R15 ; /* 0x0000001110117223 */ /* 0x004fc6000000000f */ /*0300*/ LDG.E R15, [R4.64+0x20] ; /* 0x00002004040f7981 */ /* 0x0000a8000c1e1900 */ /*0310*/ LDG.E R16, [R2.64+0x20] ; /* 0x0000200402107981 */ /* 0x0002a2000c1e1900 */ /*0320*/ FFMA R25, R18, R25, R17 ; /* 0x0000001912197223 */ /* 0x008fc60000000011 */ /*0330*/ LDG.E R17, [R4.64+0x24] ; /* 0x0000240404117981 */ /* 0x0000e8000c1e1900 */ /*0340*/ LDG.E R18, [R2.64+0x24] ; /* 0x0000240402127981 */ /* 0x0002e2000c1e1900 */ /*0350*/ FFMA R25, R19, R20, R25 ; /* 0x0000001413197223 */ /* 0x010fc60000000019 */ /*0360*/ LDG.E R19, [R4.64+0x28] ; /* 0x0000280404137981 */ /* 0x000128000c1e1900 */ /*0370*/ LDG.E R20, [R2.64+0x28] ; /* 0x0000280402147981 */ /* 0x000322000c1e1900 */ /*0380*/ FFMA R25, R22, R21, R25 ; /* 0x0000001516197223 */ /* 0x020fc60000000019 */ /*0390*/ LDG.E R21, [R4.64+0x2c] ; /* 0x00002c0404157981 */ /* 0x000168000c1e1900 */ /*03a0*/ LDG.E R22, [R2.64+0x2c] ; /* 0x00002c0402167981 */ /* 0x000362000c1e1900 */ /*03b0*/ FFMA R25, R23, R24, R25 ; /* 0x0000001817197223 */ /* 0x000fc60000000019 */ /*03c0*/ LDG.E R23, [R4.64+0x30] ; /* 0x0000300404177981 */ /* 0x000168000c1e1900 */ /*03d0*/ LDG.E R24, [R2.64+0x30] ; /* 0x0000300402187981 */ /* 0x000362000c1e1900 */ /*03e0*/ FFMA R25, R9, R10, R25 ; /* 0x0000000a09197223 */ /* 0x000fc60000000019 */ /*03f0*/ LDG.E R9, [R4.64+0x34] ; /* 0x0000340404097981 */ /* 0x000168000c1e1900 */ /*0400*/ LDG.E R10, [R2.64+0x34] ; /* 0x00003404020a7981 */ /* 0x000362000c1e1900 */ /*0410*/ FFMA R25, R11, R12, R25 ; /* 0x0000000c0b197223 */ /* 0x000fc60000000019 */ /*0420*/ LDG.E R11, [R4.64+0x38] ; /* 0x00003804040b7981 */ /* 0x000168000c1e1900 */ /*0430*/ LDG.E R12, [R2.64+0x38] ; /* 0x00003804020c7981 */ /* 0x000362000c1e1900 */ /*0440*/ FFMA R25, R13, R14, R25 ; /* 0x0000000e0d197223 */ /* 0x000fc60000000019 */ /*0450*/ LDG.E R13, [R4.64+0x3c] ; /* 0x00003c04040d7981 */ /* 0x000168000c1e1900 */ /*0460*/ LDG.E R14, [R2.64+0x3c] ; /* 0x00003c04020e7981 */ /* 0x000362000c1e1900 */ /*0470*/ IADD3 R8, R8, -0x10, RZ ; /* 0xfffffff008087810 */ /* 0x000fe40007ffe0ff */ /*0480*/ IADD3 R7, R7, 0x10, RZ ; /* 0x0000001007077810 */ /* 0x000fe40007ffe0ff */ /*0490*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */ /* 0x000fe40003f24270 */ /*04a0*/ IADD3 R4, P3, R4, 0x40, RZ ; /* 0x0000004004047810 */ /* 0x001fc40007f7e0ff */ /*04b0*/ IADD3 R2, P2, R2, 0x40, RZ ; /* 0x0000004002027810 */ /* 0x002fe40007f5e0ff */ /*04c0*/ IADD3.X R5, RZ, R5, RZ, P3, !PT ; /* 0x00000005ff057210 */ /* 0x000fc60001ffe4ff */ /*04d0*/ IMAD.X R3, RZ, RZ, R3, P2 ; /* 0x000000ffff037224 */ /* 0x000fe400010e0603 */ /*04e0*/ FFMA R15, R15, R16, R25 ; /* 0x000000100f0f7223 */ /* 0x004fc80000000019 */ /*04f0*/ FFMA R15, R17, R18, R15 ; /* 0x00000012110f7223 */ /* 0x008fc8000000000f */ /*0500*/ FFMA R15, R19, R20, R15 ; /* 0x00000014130f7223 */ /* 0x010fc8000000000f */ /*0510*/ FFMA R15, R21, R22, R15 ; /* 0x00000016150f7223 */ /* 0x020fc8000000000f */ /*0520*/ FFMA R15, R23, R24, R15 ; /* 0x00000018170f7223 */ /* 0x000fc8000000000f */ /*0530*/ FFMA R9, R9, R10, R15 ; /* 0x0000000a09097223 */ /* 0x000fc8000000000f */ /*0540*/ FFMA R9, R11, R12, R9 ; /* 0x0000000c0b097223 */ /* 0x000fc80000000009 */ /*0550*/ FFMA R15, R13, R14, R9 ; /* 0x0000000e0d0f7223 */ /* 0x000fe20000000009 */ /*0560*/ @P1 BRA 0x1f0 ; /* 0xfffffc8000001947 */ /* 0x000fea000383ffff */ /*0570*/ ISETP.GT.AND P1, PT, R8, 0x4, PT ; /* 0x000000040800780c */ /* 0x000fda0003f24270 */ /*0580*/ @!P1 BRA 0x790 ; /* 0x0000020000009947 */ /* 0x000fea0003800000 */ /*0590*/ LDG.E R16, [R4.64] ; /* 0x0000000404107981 */ /* 0x0000a8000c1e1900 */ /*05a0*/ LDG.E R17, [R2.64] ; /* 0x0000000402117981 */ /* 0x000ea8000c1e1900 */ /*05b0*/ LDG.E R19, [R4.64+0x4] ; /* 0x0000040404137981 */ /* 0x0000e8000c1e1900 */ /*05c0*/ LDG.E R18, [R2.64+0x4] ; /* 0x0000040402127981 */ /* 0x000ee8000c1e1900 */ /*05d0*/ LDG.E R21, [R4.64+0x8] ; /* 0x0000080404157981 */ /* 0x000128000c1e1900 */ /*05e0*/ LDG.E R20, [R2.64+0x8] ; /* 0x0000080402147981 */ /* 0x000f28000c1e1900 */ /*05f0*/ LDG.E R23, [R4.64+0xc] ; /* 0x00000c0404177981 */ /* 0x000168000c1e1900 */ /*0600*/ LDG.E R22, [R2.64+0xc] ; /* 0x00000c0402167981 */ /* 0x000f68000c1e1900 */ /*0610*/ LDG.E R25, [R4.64+0x10] ; /* 0x0000100404197981 */ /* 0x000168000c1e1900 */ /*0620*/ LDG.E R24, [R2.64+0x10] ; /* 0x0000100402187981 */ /* 0x000f68000c1e1900 */ /*0630*/ LDG.E R13, [R4.64+0x14] ; /* 0x00001404040d7981 */ /* 0x000168000c1e1900 */ /*0640*/ LDG.E R14, [R2.64+0x14] ; /* 0x00001404020e7981 */ /* 0x000f68000c1e1900 */ /*0650*/ LDG.E R11, [R4.64+0x18] ; /* 0x00001804040b7981 */ /* 0x000168000c1e1900 */ /*0660*/ LDG.E R12, [R2.64+0x18] ; /* 0x00001804020c7981 */ /* 0x000f68000c1e1900 */ /*0670*/ LDG.E R9, [R4.64+0x1c] ; /* 0x00001c0404097981 */ /* 0x000168000c1e1900 */ /*0680*/ LDG.E R10, [R2.64+0x1c] ; /* 0x00001c04020a7981 */ /* 0x000362000c1e1900 */ /*0690*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fc40003f0e170 */ /*06a0*/ IADD3 R7, R7, 0x8, RZ ; /* 0x0000000807077810 */ /* 0x000fe40007ffe0ff */ /*06b0*/ IADD3 R8, R8, -0x8, RZ ; /* 0xfffffff808087810 */ /* 0x000fe40007ffe0ff */ /*06c0*/ IADD3 R4, P2, R4, 0x20, RZ ; /* 0x0000002004047810 */ /* 0x001fca0007f5e0ff */ /*06d0*/ IMAD.X R5, RZ, RZ, R5, P2 ; /* 0x000000ffff057224 */ /* 0x000fe400010e0605 */ /*06e0*/ FFMA R16, R16, R17, R15 ; /* 0x0000001110107223 */ /* 0x004fc8000000000f */ /*06f0*/ FFMA R16, R19, R18, R16 ; /* 0x0000001213107223 */ /* 0x008fc80000000010 */ /*0700*/ FFMA R16, R21, R20, R16 ; /* 0x0000001415107223 */ /* 0x010fc80000000010 */ /*0710*/ FFMA R16, R23, R22, R16 ; /* 0x0000001617107223 */ /* 0x020fc80000000010 */ /*0720*/ FFMA R16, R25, R24, R16 ; /* 0x0000001819107223 */ /* 0x000fc80000000010 */ /*0730*/ FFMA R13, R13, R14, R16 ; /* 0x0000000e0d0d7223 */ /* 0x000fc80000000010 */ /*0740*/ FFMA R11, R11, R12, R13 ; /* 0x0000000c0b0b7223 */ /* 0x000fe2000000000d */ /*0750*/ IADD3 R12, P1, R2, 0x20, RZ ; /* 0x00000020020c7810 */ /* 0x000fc80007f3e0ff */ /*0760*/ IADD3.X R3, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff037210 */ /* 0x002fe40000ffe4ff */ /*0770*/ MOV R2, R12 ; /* 0x0000000c00027202 */ /* 0x000fe20000000f00 */ /*0780*/ FFMA R15, R9, R10, R11 ; /* 0x0000000a090f7223 */ /* 0x000fe4000000000b */ /*0790*/ ISETP.NE.OR P0, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */ /* 0x000fda0000705670 */ /*07a0*/ @!P0 BRA 0x930 ; /* 0x0000018000008947 */ /* 0x000fea0003800000 */ /*07b0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*07c0*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */ /* 0x000ea8000c1e1900 */ /*07d0*/ LDG.E R12, [R4.64+0x4] ; /* 0x00000404040c7981 */ /* 0x0000e8000c1e1900 */ /*07e0*/ LDG.E R11, [R2.64+0x4] ; /* 0x00000404020b7981 */ /* 0x000ee8000c1e1900 */ /*07f0*/ LDG.E R14, [R4.64+0x8] ; /* 0x00000804040e7981 */ /* 0x000128000c1e1900 */ /*0800*/ LDG.E R13, [R2.64+0x8] ; /* 0x00000804020d7981 */ /* 0x000328000c1e1900 */ /*0810*/ LDG.E R16, [R4.64+0xc] ; /* 0x00000c0404107981 */ /* 0x000168000c1e1900 */ /*0820*/ LDG.E R17, [R2.64+0xc] ; /* 0x00000c0402117981 */ /* 0x000362000c1e1900 */ /*0830*/ IADD3 R8, R8, -0x4, RZ ; /* 0xfffffffc08087810 */ /* 0x000fc40007ffe0ff */ /*0840*/ IADD3 R7, R7, 0x4, RZ ; /* 0x0000000407077810 */ /* 0x000fe40007ffe0ff */ /*0850*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */ /* 0x000fe20003f05270 */ /*0860*/ FFMA R9, R10, R9, R15 ; /* 0x000000090a097223 */ /* 0x004fe2000000000f */ /*0870*/ IADD3 R10, P2, R4, 0x10, RZ ; /* 0x00000010040a7810 */ /* 0x000fca0007f5e0ff */ /*0880*/ IMAD.MOV.U32 R4, RZ, RZ, R10 ; /* 0x000000ffff047224 */ /* 0x001fe400078e000a */ /*0890*/ FFMA R9, R12, R11, R9 ; /* 0x0000000b0c097223 */ /* 0x008fe20000000009 */ /*08a0*/ IADD3 R12, P1, R2, 0x10, RZ ; /* 0x00000010020c7810 */ /* 0x000fe20007f3e0ff */ /*08b0*/ IMAD.X R11, RZ, RZ, R5, P2 ; /* 0x000000ffff0b7224 */ /* 0x000fc600010e0605 */ /*08c0*/ MOV R2, R12 ; /* 0x0000000c00027202 */ /* 0x002fe40000000f00 */ /*08d0*/ MOV R5, R11 ; /* 0x0000000b00057202 */ /* 0x000fe20000000f00 */ /*08e0*/ FFMA R9, R14, R13, R9 ; /* 0x0000000d0e097223 */ /* 0x010fe20000000009 */ /*08f0*/ IADD3.X R13, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff0d7210 */ /* 0x000fca0000ffe4ff */ /*0900*/ IMAD.MOV.U32 R3, RZ, RZ, R13 ; /* 0x000000ffff037224 */ /* 0x000fe400078e000d */ /*0910*/ FFMA R15, R16, R17, R9 ; /* 0x00000011100f7223 */ /* 0x020fe20000000009 */ /*0920*/ @P0 BRA 0x7b0 ; /* 0xfffffe8000000947 */ /* 0x000fea000383ffff */ /*0930*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fda0003f05270 */ /*0940*/ @!P0 BRA 0xa70 ; /* 0x0000012000008947 */ /* 0x000fea0003800000 */ /*0950*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0960*/ IMAD R4, R0, c[0x0][0x178], R7 ; /* 0x00005e0000047a24 */ /* 0x000fd200078e0207 */ /*0970*/ IMAD.WIDE R2, R7, R5, c[0x0][0x160] ; /* 0x0000580007027625 */ /* 0x000fc800078e0205 */ /*0980*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */ /* 0x000fe200078e0205 */ /*0990*/ MOV R8, R2 ; /* 0x0000000200087202 */ /* 0x000fc60000000f00 */ /*09a0*/ IMAD.MOV.U32 R7, RZ, RZ, R4 ; /* 0x000000ffff077224 */ /* 0x000fc600078e0004 */ /*09b0*/ MOV R2, R8 ; /* 0x0000000800027202 */ /* 0x000fe40000000f00 */ /*09c0*/ MOV R4, R7 ; /* 0x0000000700047202 */ /* 0x000fc80000000f00 */ /*09d0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x0000a8000c1e1900 */ /*09e0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x0002a2000c1e1900 */ /*09f0*/ IADD3 R6, R6, -0x1, RZ ; /* 0xffffffff06067810 */ /* 0x000fe40007ffe0ff */ /*0a00*/ IADD3 R8, P1, R8, 0x4, RZ ; /* 0x0000000408087810 */ /* 0x000fe40007f3e0ff */ /*0a10*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe40003f05270 */ /*0a20*/ IADD3 R7, P2, R7, 0x4, RZ ; /* 0x0000000407077810 */ /* 0x000fe20007f5e0ff */ /*0a30*/ IMAD.X R3, RZ, RZ, R3, P1 ; /* 0x000000ffff037224 */ /* 0x001fc600008e0603 */ /*0a40*/ IADD3.X R5, RZ, R5, RZ, P2, !PT ; /* 0x00000005ff057210 */ /* 0x002fe200017fe4ff */ /*0a50*/ FFMA R15, R2, R4, R15 ; /* 0x00000004020f7223 */ /* 0x004fcc000000000f */ /*0a60*/ @P0 BRA 0x9b0 ; /* 0xffffff4000000947 */ /* 0x000fea000383ffff */ /*0a70*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0a80*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fd400000001ff */ /*0a90*/ IMAD.WIDE R2, R0, R3, c[0x0][0x170] ; /* 0x00005c0000027625 */ /* 0x000fca00078e0203 */ /*0aa0*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */ /* 0x000fe2000c101904 */ /*0ab0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0ac0*/ BRA 0xac0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0ad0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ae0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0af0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z26matrixVectorMultiplicationPfS_S_i .globl _Z26matrixVectorMultiplicationPfS_S_i .p2align 8 .type _Z26matrixVectorMultiplicationPfS_S_i,@function _Z26matrixVectorMultiplicationPfS_S_i: s_clause 0x1 s_load_b32 s3, s[0:1], 0x2c s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s3, s3, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1] v_max_i32_e32 v0, 0, v1 s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_i32_e32 vcc_lo, s2, v0 v_mov_b32_e32 v0, 0 s_and_saveexec_b32 s3, vcc_lo s_cbranch_execz .LBB0_3 s_load_b128 s[4:7], s[0:1], 0x0 v_mul_lo_u32 v2, v1, s2 v_mov_b32_e32 v0, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[2:3], 2, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s6, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s7, v3, vcc_lo .LBB0_2: global_load_b32 v4, v[2:3], off s_load_b32 s6, s[4:5], 0x0 v_add_co_u32 v2, vcc_lo, v2, 4 s_add_i32 s2, s2, -1 v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo s_add_u32 s4, s4, 4 s_addc_u32 s5, s5, 0 s_cmp_lg_u32 s2, 0 s_waitcnt vmcnt(0) lgkmcnt(0) v_fmac_f32_e32 v0, s6, v4 s_cbranch_scc1 .LBB0_2 .LBB0_3: s_or_b32 exec_lo, exec_lo, s3 s_load_b64 s[0:1], s[0:1], 0x10 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[1:2], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v1, vcc_lo, s0, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo global_store_b32 v[1:2], v0, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z26matrixVectorMultiplicationPfS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z26matrixVectorMultiplicationPfS_S_i, .Lfunc_end0-_Z26matrixVectorMultiplicationPfS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z26matrixVectorMultiplicationPfS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z26matrixVectorMultiplicationPfS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00188c7b_00000000-6_matvec_cuda1.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3674: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3674: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z10init_arrayPfi .type _Z10init_arrayPfi, @function _Z10init_arrayPfi: .LFB3670: .cfi_startproc endbr64 testl %esi, %esi jle .L3 movq %rdi, %rax movslq %esi, %rsi leaq (%rdi,%rsi,4), %rdx movss .LC0(%rip), %xmm0 .L5: movss %xmm0, (%rax) addq $4, %rax cmpq %rdx, %rax jne .L5 .L3: ret .cfi_endproc .LFE3670: .size _Z10init_arrayPfi, .-_Z10init_arrayPfi .globl _Z8init_matPfii .type _Z8init_matPfii, @function _Z8init_matPfii: .LFB3671: .cfi_startproc endbr64 testl %esi, %esi jle .L7 movl $0, %r9d movl $0, %r8d movslq %edx, %r10 movss .LC1(%rip), %xmm0 jmp .L9 .L11: movslq %r9d, %rcx leaq (%rdi,%rcx,4), %rax addq %r10, %rcx leaq (%rdi,%rcx,4), %rcx .L10: movss %xmm0, (%rax) addq $4, %rax cmpq %rcx, %rax jne .L10 .L12: addl $1, %r8d addl %edx, %r9d cmpl %r8d, %esi je .L7 .L9: testl %edx, %edx jg .L11 jmp .L12 .L7: ret .cfi_endproc .LFE3671: .size _Z8init_matPfii, .-_Z8init_matPfii .globl _Z51__device_stub__Z26matrixVectorMultiplicationPfS_S_iPfS_S_i .type _Z51__device_stub__Z26matrixVectorMultiplicationPfS_S_iPfS_S_i, @function _Z51__device_stub__Z26matrixVectorMultiplicationPfS_S_iPfS_S_i: .LFB3696: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L18 .L14: movq 136(%rsp), %rax subq %fs:40, %rax jne .L19 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L18: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z26matrixVectorMultiplicationPfS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L14 .L19: call __stack_chk_fail@PLT .cfi_endproc .LFE3696: .size _Z51__device_stub__Z26matrixVectorMultiplicationPfS_S_iPfS_S_i, .-_Z51__device_stub__Z26matrixVectorMultiplicationPfS_S_iPfS_S_i .globl _Z26matrixVectorMultiplicationPfS_S_i .type _Z26matrixVectorMultiplicationPfS_S_i, @function _Z26matrixVectorMultiplicationPfS_S_i: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z51__device_stub__Z26matrixVectorMultiplicationPfS_S_iPfS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _Z26matrixVectorMultiplicationPfS_S_i, .-_Z26matrixVectorMultiplicationPfS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "<<<<<<<<<< initial data:\n" .LC4: .string "\nGPU Time Elapsed: " .LC5: .string "Error : " .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $88, %rsp .cfi_def_cfa_offset 144 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $131072, %edi call malloc@PLT movq %rax, %r14 movabsq $4294967296, %rdi call malloc@PLT movq %rax, %rbp movl $131072, %edi call malloc@PLT movq %rax, %r13 movl $131072, %edi call malloc@PLT movq %rax, %r12 movq %r14, %rbx leaq 131072(%r14), %rdx movq %r14, %rax movss .LC0(%rip), %xmm0 .L23: movss %xmm0, (%rax) addq $4, %rax cmpq %rdx, %rax jne .L23 movl $0x3f800000, 131072(%r13) leaq 131072(%rbp), %rdx movabsq $4295098368, %rcx addq %rbp, %rcx movss .LC1(%rip), %xmm0 .L24: leaq -131072(%rdx), %rax .L25: movss %xmm0, (%rax) addq $4, %rax cmpq %rdx, %rax jne .L25 addq $131072, %rdx cmpq %rcx, %rdx jne .L24 leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 8(%rsp), %rdi movl $131072, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movabsq $4294967296, %r15 movq %r15, %rsi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $131072, %esi call cudaMalloc@PLT movl $1, %ecx movl $131072, %edx movq %r14, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %r15, %rdx movq %rbp, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT leaq 32(%rsp), %rdi call cudaEventCreate@PLT leaq 40(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 32(%rsp), %rdi call cudaEventRecord@PLT movl $32, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1024, 48(%rsp) movl $1, 52(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L38 .L27: movl $0, %esi movq 40(%rsp), %rdi call cudaEventRecord@PLT movq 40(%rsp), %rdi call cudaEventSynchronize@PLT movl $0x00000000, 60(%rsp) leaq 60(%rsp), %rdi movq 40(%rsp), %rdx movq 32(%rsp), %rsi call cudaEventElapsedTime@PLT movl $2, %ecx movl $131072, %edx movq 24(%rsp), %rsi movq %r13, %rdi call cudaMemcpy@PLT leaq .LC4(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi pxor %xmm0, %xmm0 cvtss2sd 60(%rsp), %xmm0 call _ZNSo9_M_insertIdEERSoT_@PLT movl $0, %ecx movl $0, %esi .L28: movl $0, %eax movl %esi, %edx .L29: movss (%rbx,%rax), %xmm0 mulss 0(%rbp,%rax), %xmm0 pxor %xmm1, %xmm1 cvtsi2ssl %edx, %xmm1 addss %xmm1, %xmm0 cvttss2sil %xmm0, %edx addq $4, %rax cmpq $131072, %rax jne .L29 pxor %xmm0, %xmm0 cvtsi2ssl %edx, %xmm0 movss %xmm0, (%r12,%rcx,4) addq $1, %rcx addq $131072, %rbx cmpq $32768, %rcx jne .L28 movl $0, %eax movl $0, %ebx .L30: movss (%r12,%rax), %xmm0 subss 0(%r13,%rax), %xmm0 pxor %xmm1, %xmm1 cvtsi2ssl %ebx, %xmm1 addss %xmm1, %xmm0 cvttss2sil %xmm0, %ebx addq $4, %rax cmpq $131072, %rax jne .L30 leaq .LC5(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl %ebx, %esi call _ZNSolsEi@PLT movq 72(%rsp), %rax subq %fs:40, %rax jne .L39 movl $0, %eax addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L38: .cfi_restore_state movl $32768, %ecx movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z51__device_stub__Z26matrixVectorMultiplicationPfS_S_iPfS_S_i jmp .L27 .L39: call __stack_chk_fail@PLT .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC6: .string "_Z26matrixVectorMultiplicationPfS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3699: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq _Z26matrixVectorMultiplicationPfS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3699: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1065353216 .align 4 .LC1: .long 1073741824 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "matvec_cuda1.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z41__device_stub__matrixVectorMultiplicationPfS_S_i # -- Begin function _Z41__device_stub__matrixVectorMultiplicationPfS_S_i .p2align 4, 0x90 .type _Z41__device_stub__matrixVectorMultiplicationPfS_S_i,@function _Z41__device_stub__matrixVectorMultiplicationPfS_S_i: # @_Z41__device_stub__matrixVectorMultiplicationPfS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z26matrixVectorMultiplicationPfS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z41__device_stub__matrixVectorMultiplicationPfS_S_i, .Lfunc_end0-_Z41__device_stub__matrixVectorMultiplicationPfS_S_i .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $152, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movabsq $4294967296, %r13 # imm = 0x100000000 movl $131072, %edi # imm = 0x20000 callq malloc movq %rax, %r15 movq %r13, %rdi callq malloc movq %rax, %r12 movl $131072, %edi # imm = 0x20000 callq malloc movq %rax, %rbx movl $131072, %edi # imm = 0x20000 callq malloc movq %rax, %r14 xorl %eax, %eax .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movl $1065353216, (%r15,%rax,4) # imm = 0x3F800000 incq %rax cmpq $32768, %rax # imm = 0x8000 jne .LBB1_1 # %bb.2: movl $1065353216, 131072(%rbx) # imm = 0x3F800000 xorl %eax, %eax movq %r12, %rcx .p2align 4, 0x90 .LBB1_3: # %.preheader75 # =>This Loop Header: Depth=1 # Child Loop BB1_4 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB1_4: # Parent Loop BB1_3 Depth=1 # => This Inner Loop Header: Depth=2 movl $1073741824, (%rcx,%rdx,4) # imm = 0x40000000 incq %rdx cmpq $32768, %rdx # imm = 0x8000 jne .LBB1_4 # %bb.5: # in Loop: Header=BB1_3 Depth=1 incq %rax addq $131072, %rcx # imm = 0x20000 cmpq $32768, %rax # imm = 0x8000 jne .LBB1_3 # %bb.6: movl $.Lstr, %edi callq puts@PLT leaq 40(%rsp), %rdi movl $131072, %esi # imm = 0x20000 callq hipMalloc leaq 32(%rsp), %rdi movq %r13, %rsi callq hipMalloc leaq 24(%rsp), %rdi movl $131072, %esi # imm = 0x20000 callq hipMalloc movq 40(%rsp), %rdi movl $131072, %edx # imm = 0x20000 movq %r15, %rsi movl $1, %ecx callq hipMemcpy movq 32(%rsp), %rdi movq %r12, %rsi movq %r13, %rdx movl $1, %ecx callq hipMemcpy leaq 16(%rsp), %rdi callq hipEventCreate movq %rsp, %rdi callq hipEventCreate movq 16(%rsp), %rdi xorl %esi, %esi callq hipEventRecord leaq 32(%r13), %rdx addq $1024, %r13 # imm = 0x400 movq %r13, %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_8 # %bb.7: movq 40(%rsp), %rax movq 32(%rsp), %rcx movq 24(%rsp), %rdx movq %rax, 144(%rsp) movq %rcx, 136(%rsp) movq %rdx, 128(%rsp) movl $32768, 12(%rsp) # imm = 0x8000 leaq 144(%rsp), %rax movq %rax, 48(%rsp) leaq 136(%rsp), %rax movq %rax, 56(%rsp) leaq 128(%rsp), %rax movq %rax, 64(%rsp) leaq 12(%rsp), %rax movq %rax, 72(%rsp) leaq 112(%rsp), %rdi leaq 96(%rsp), %rsi leaq 88(%rsp), %rdx leaq 80(%rsp), %rcx callq __hipPopCallConfiguration movq 112(%rsp), %rsi movl 120(%rsp), %edx movq 96(%rsp), %rcx movl 104(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z26matrixVectorMultiplicationPfS_S_i, %edi pushq 80(%rsp) .cfi_adjust_cfa_offset 8 pushq 96(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_8: movq (%rsp), %rdi xorl %r13d, %r13d xorl %esi, %esi callq hipEventRecord movq (%rsp), %rdi callq hipEventSynchronize movl $0, 48(%rsp) movq 16(%rsp), %rsi movq (%rsp), %rdx leaq 48(%rsp), %rdi callq hipEventElapsedTime movq 24(%rsp), %rsi movl $131072, %edx # imm = 0x20000 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $20, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movss 48(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ .p2align 4, 0x90 .LBB1_9: # %.preheader74 # =>This Loop Header: Depth=1 # Child Loop BB1_10 Depth 2 xorl %eax, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB1_10: # Parent Loop BB1_9 Depth=1 # => This Inner Loop Header: Depth=2 xorps %xmm0, %xmm0 cvtsi2ss %ecx, %xmm0 movss (%r15,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss (%r12,%rax,4), %xmm1 addss %xmm0, %xmm1 cvttss2si %xmm1, %ecx incq %rax cmpq $32768, %rax # imm = 0x8000 jne .LBB1_10 # %bb.11: # in Loop: Header=BB1_9 Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %ecx, %xmm0 movss %xmm0, (%r14,%r13,4) incq %r13 addq $131072, %r15 # imm = 0x20000 cmpq $32768, %r13 # imm = 0x8000 jne .LBB1_9 # %bb.12: # %.preheader.preheader xorl %eax, %eax xorl %ebp, %ebp .p2align 4, 0x90 .LBB1_13: # %.preheader # =>This Inner Loop Header: Depth=1 movss (%r14,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero subss (%rbx,%rax,4), %xmm0 xorps %xmm1, %xmm1 cvtsi2ss %ebp, %xmm1 addss %xmm0, %xmm1 cvttss2si %xmm1, %ebp incq %rax cmpq $32768, %rax # imm = 0x8000 jne .LBB1_13 # %bb.14: movl $_ZSt4cout, %edi movl $.L.str.2, %esi movl $8, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movl %ebp, %esi callq _ZNSolsEi xorl %eax, %eax addq $152, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .globl _Z10init_arrayPfi # -- Begin function _Z10init_arrayPfi .p2align 4, 0x90 .type _Z10init_arrayPfi,@function _Z10init_arrayPfi: # @_Z10init_arrayPfi .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB2_3 # %bb.1: # %.lr.ph.preheader movl %esi, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB2_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movl $1065353216, (%rdi,%rcx,4) # imm = 0x3F800000 incq %rcx cmpq %rcx, %rax jne .LBB2_2 .LBB2_3: # %._crit_edge retq .Lfunc_end2: .size _Z10init_arrayPfi, .Lfunc_end2-_Z10init_arrayPfi .cfi_endproc # -- End function .globl _Z8init_matPfii # -- Begin function _Z8init_matPfii .p2align 4, 0x90 .type _Z8init_matPfii,@function _Z8init_matPfii: # @_Z8init_matPfii .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB3_6 # %bb.1: # %.preheader.lr.ph movl %esi, %eax movl %edx, %ecx xorl %esi, %esi xorl %r8d, %r8d jmp .LBB3_2 .p2align 4, 0x90 .LBB3_5: # %._crit_edge # in Loop: Header=BB3_2 Depth=1 incq %r8 addl %edx, %esi cmpq %rax, %r8 je .LBB3_6 .LBB3_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB3_4 Depth 2 testl %edx, %edx jle .LBB3_5 # %bb.3: # %.lr.ph # in Loop: Header=BB3_2 Depth=1 movl %esi, %r9d leaq (%rdi,%r9,4), %r9 xorl %r10d, %r10d .p2align 4, 0x90 .LBB3_4: # Parent Loop BB3_2 Depth=1 # => This Inner Loop Header: Depth=2 movl $1073741824, (%r9,%r10,4) # imm = 0x40000000 incq %r10 cmpq %r10, %rcx jne .LBB3_4 jmp .LBB3_5 .LBB3_6: # %._crit_edge13 retq .Lfunc_end3: .size _Z8init_matPfii, .Lfunc_end3-_Z8init_matPfii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z26matrixVectorMultiplicationPfS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z26matrixVectorMultiplicationPfS_S_i,@object # @_Z26matrixVectorMultiplicationPfS_S_i .section .rodata,"a",@progbits .globl _Z26matrixVectorMultiplicationPfS_S_i .p2align 3, 0x0 _Z26matrixVectorMultiplicationPfS_S_i: .quad _Z41__device_stub__matrixVectorMultiplicationPfS_S_i .size _Z26matrixVectorMultiplicationPfS_S_i, 8 .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "\nGPU Time Elapsed: " .size .L.str.1, 21 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "Error : " .size .L.str.2, 9 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z26matrixVectorMultiplicationPfS_S_i" .size .L__unnamed_1, 38 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "<<<<<<<<<< initial data:" .size .Lstr, 25 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z41__device_stub__matrixVectorMultiplicationPfS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z26matrixVectorMultiplicationPfS_S_i .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void gpu_histo_kernel_naive(u_char* Source, int *res, unsigned height, unsigned width){ int j = blockIdx.x*blockDim.x + threadIdx.x; int i = blockIdx.y*blockDim.y + threadIdx.y; if ((i<0)||(i>=height)||(j<0)||(j>=width)) {} else { u_char val = Source[i*width+j]; atomicAdd(&res[val],1); } }
code for sm_80 Function : _Z22gpu_histo_kernel_naivePhPijj .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e680000002600 */ /*0030*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e680000002200 */ /*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e220000002100 */ /*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */ /* 0x002fc400078e0202 */ /*0060*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x001fc600078e0205 */ /*0070*/ ISETP.GE.U32.AND P0, PT, R3.reuse, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */ /* 0x040fe40003f06070 */ /*0080*/ LOP3.LUT R2, R3, R0, RZ, 0xfc, !PT ; /* 0x0000000003027212 */ /* 0x000fc800078efcff */ /*0090*/ ISETP.LT.OR P0, PT, R2, RZ, P0 ; /* 0x000000ff0200720c */ /* 0x000fc80000701670 */ /*00a0*/ ISETP.GE.U32.OR P0, PT, R0, c[0x0][0x174], P0 ; /* 0x00005d0000007a0c */ /* 0x000fda0000706470 */ /*00b0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00c0*/ IMAD R0, R3, c[0x0][0x174], R0 ; /* 0x00005d0003007a24 */ /* 0x000fe200078e0200 */ /*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc80000000a00 */ /*00e0*/ IADD3 R4, P0, R0, c[0x0][0x160], RZ ; /* 0x0000580000047a10 */ /* 0x000fca0007f1e0ff */ /*00f0*/ IMAD.X R5, RZ, RZ, c[0x0][0x164], P0 ; /* 0x00005900ff057624 */ /* 0x000fca00000e06ff */ /*0100*/ LDG.E.U8 R2, [R4.64] ; /* 0x0000000404027981 */ /* 0x000ea2000c1e1100 */ /*0110*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fe200000001ff */ /*0120*/ MOV R7, 0x1 ; /* 0x0000000100077802 */ /* 0x000fd20000000f00 */ /*0130*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */ /* 0x004fca00078e0003 */ /*0140*/ RED.E.ADD.STRONG.GPU [R2.64], R7 ; /* 0x000000070200798e */ /* 0x000fe2000c10e184 */ /*0150*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0160*/ BRA 0x160; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void gpu_histo_kernel_naive(u_char* Source, int *res, unsigned height, unsigned width){ int j = blockIdx.x*blockDim.x + threadIdx.x; int i = blockIdx.y*blockDim.y + threadIdx.y; if ((i<0)||(i>=height)||(j<0)||(j>=width)) {} else { u_char val = Source[i*width+j]; atomicAdd(&res[val],1); } }
.file "tmpxft_0010991d_00000000-6_gpu_histo_kernel_naive.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z46__device_stub__Z22gpu_histo_kernel_naivePhPijjPhPijj .type _Z46__device_stub__Z22gpu_histo_kernel_naivePhPijjPhPijj, @function _Z46__device_stub__Z22gpu_histo_kernel_naivePhPijjPhPijj: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z22gpu_histo_kernel_naivePhPijj(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z46__device_stub__Z22gpu_histo_kernel_naivePhPijjPhPijj, .-_Z46__device_stub__Z22gpu_histo_kernel_naivePhPijjPhPijj .globl _Z22gpu_histo_kernel_naivePhPijj .type _Z22gpu_histo_kernel_naivePhPijj, @function _Z22gpu_histo_kernel_naivePhPijj: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z46__device_stub__Z22gpu_histo_kernel_naivePhPijjPhPijj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z22gpu_histo_kernel_naivePhPijj, .-_Z22gpu_histo_kernel_naivePhPijj .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z22gpu_histo_kernel_naivePhPijj" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z22gpu_histo_kernel_naivePhPijj(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void gpu_histo_kernel_naive(u_char* Source, int *res, unsigned height, unsigned width){ int j = blockIdx.x*blockDim.x + threadIdx.x; int i = blockIdx.y*blockDim.y + threadIdx.y; if ((i<0)||(i>=height)||(j<0)||(j>=width)) {} else { u_char val = Source[i*width+j]; atomicAdd(&res[val],1); } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void gpu_histo_kernel_naive(u_char* Source, int *res, unsigned height, unsigned width){ int j = blockIdx.x*blockDim.x + threadIdx.x; int i = blockIdx.y*blockDim.y + threadIdx.y; if ((i<0)||(i>=height)||(j<0)||(j>=width)) {} else { u_char val = Source[i*width+j]; atomicAdd(&res[val],1); } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void gpu_histo_kernel_naive(u_char* Source, int *res, unsigned height, unsigned width){ int j = blockIdx.x*blockDim.x + threadIdx.x; int i = blockIdx.y*blockDim.y + threadIdx.y; if ((i<0)||(i>=height)||(j<0)||(j>=width)) {} else { u_char val = Source[i*width+j]; atomicAdd(&res[val],1); } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z22gpu_histo_kernel_naivePhPijj .globl _Z22gpu_histo_kernel_naivePhPijj .p2align 8 .type _Z22gpu_histo_kernel_naivePhPijj,@function _Z22gpu_histo_kernel_naivePhPijj: s_load_b32 s4, s[0:1], 0x24 v_bfe_u32 v3, v0, 10, 10 s_add_u32 s2, s0, 24 s_addc_u32 s3, s1, 0 s_waitcnt lgkmcnt(0) s_lshr_b32 s4, s4, 16 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s4, v[3:4] s_mov_b32 s4, exec_lo v_cmpx_lt_i32_e32 -1, v1 s_cbranch_execz .LBB0_3 s_load_b32 s2, s[2:3], 0xc s_load_b64 s[4:5], s[0:1], 0x10 v_and_b32_e32 v0, 0x3ff, v0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff v_cmp_gt_u32_e32 vcc_lo, s4, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[2:3], null, s14, s2, v[0:1] v_cmp_gt_u32_e64 s2, s5, v2 v_cmp_lt_i32_e64 s3, -1, v2 s_delay_alu instid0(VALU_DEP_2) s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) s_and_b32 s2, s2, s3 s_delay_alu instid0(SALU_CYCLE_1) s_and_b32 exec_lo, exec_lo, s2 s_cbranch_execz .LBB0_3 s_load_b128 s[0:3], s[0:1], 0x0 v_mad_u64_u32 v[3:4], null, v1, s5, v[2:3] v_mov_b32_e32 v1, 1 s_waitcnt lgkmcnt(0) global_load_u8 v0, v3, s[0:1] s_waitcnt vmcnt(0) v_lshlrev_b32_e32 v0, 2, v0 global_atomic_add_u32 v0, v1, s[2:3] .LBB0_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z22gpu_histo_kernel_naivePhPijj .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z22gpu_histo_kernel_naivePhPijj, .Lfunc_end0-_Z22gpu_histo_kernel_naivePhPijj .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z22gpu_histo_kernel_naivePhPijj .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z22gpu_histo_kernel_naivePhPijj.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void gpu_histo_kernel_naive(u_char* Source, int *res, unsigned height, unsigned width){ int j = blockIdx.x*blockDim.x + threadIdx.x; int i = blockIdx.y*blockDim.y + threadIdx.y; if ((i<0)||(i>=height)||(j<0)||(j>=width)) {} else { u_char val = Source[i*width+j]; atomicAdd(&res[val],1); } }
.text .file "gpu_histo_kernel_naive.hip" .globl _Z37__device_stub__gpu_histo_kernel_naivePhPijj # -- Begin function _Z37__device_stub__gpu_histo_kernel_naivePhPijj .p2align 4, 0x90 .type _Z37__device_stub__gpu_histo_kernel_naivePhPijj,@function _Z37__device_stub__gpu_histo_kernel_naivePhPijj: # @_Z37__device_stub__gpu_histo_kernel_naivePhPijj .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z22gpu_histo_kernel_naivePhPijj, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z37__device_stub__gpu_histo_kernel_naivePhPijj, .Lfunc_end0-_Z37__device_stub__gpu_histo_kernel_naivePhPijj .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z22gpu_histo_kernel_naivePhPijj, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z22gpu_histo_kernel_naivePhPijj,@object # @_Z22gpu_histo_kernel_naivePhPijj .section .rodata,"a",@progbits .globl _Z22gpu_histo_kernel_naivePhPijj .p2align 3, 0x0 _Z22gpu_histo_kernel_naivePhPijj: .quad _Z37__device_stub__gpu_histo_kernel_naivePhPijj .size _Z22gpu_histo_kernel_naivePhPijj, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z22gpu_histo_kernel_naivePhPijj" .size .L__unnamed_1, 33 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z37__device_stub__gpu_histo_kernel_naivePhPijj .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z22gpu_histo_kernel_naivePhPijj .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z22gpu_histo_kernel_naivePhPijj .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e680000002600 */ /*0030*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e680000002200 */ /*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e220000002100 */ /*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */ /* 0x002fc400078e0202 */ /*0060*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x001fc600078e0205 */ /*0070*/ ISETP.GE.U32.AND P0, PT, R3.reuse, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */ /* 0x040fe40003f06070 */ /*0080*/ LOP3.LUT R2, R3, R0, RZ, 0xfc, !PT ; /* 0x0000000003027212 */ /* 0x000fc800078efcff */ /*0090*/ ISETP.LT.OR P0, PT, R2, RZ, P0 ; /* 0x000000ff0200720c */ /* 0x000fc80000701670 */ /*00a0*/ ISETP.GE.U32.OR P0, PT, R0, c[0x0][0x174], P0 ; /* 0x00005d0000007a0c */ /* 0x000fda0000706470 */ /*00b0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00c0*/ IMAD R0, R3, c[0x0][0x174], R0 ; /* 0x00005d0003007a24 */ /* 0x000fe200078e0200 */ /*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc80000000a00 */ /*00e0*/ IADD3 R4, P0, R0, c[0x0][0x160], RZ ; /* 0x0000580000047a10 */ /* 0x000fca0007f1e0ff */ /*00f0*/ IMAD.X R5, RZ, RZ, c[0x0][0x164], P0 ; /* 0x00005900ff057624 */ /* 0x000fca00000e06ff */ /*0100*/ LDG.E.U8 R2, [R4.64] ; /* 0x0000000404027981 */ /* 0x000ea2000c1e1100 */ /*0110*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fe200000001ff */ /*0120*/ MOV R7, 0x1 ; /* 0x0000000100077802 */ /* 0x000fd20000000f00 */ /*0130*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */ /* 0x004fca00078e0003 */ /*0140*/ RED.E.ADD.STRONG.GPU [R2.64], R7 ; /* 0x000000070200798e */ /* 0x000fe2000c10e184 */ /*0150*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0160*/ BRA 0x160; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z22gpu_histo_kernel_naivePhPijj .globl _Z22gpu_histo_kernel_naivePhPijj .p2align 8 .type _Z22gpu_histo_kernel_naivePhPijj,@function _Z22gpu_histo_kernel_naivePhPijj: s_load_b32 s4, s[0:1], 0x24 v_bfe_u32 v3, v0, 10, 10 s_add_u32 s2, s0, 24 s_addc_u32 s3, s1, 0 s_waitcnt lgkmcnt(0) s_lshr_b32 s4, s4, 16 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s4, v[3:4] s_mov_b32 s4, exec_lo v_cmpx_lt_i32_e32 -1, v1 s_cbranch_execz .LBB0_3 s_load_b32 s2, s[2:3], 0xc s_load_b64 s[4:5], s[0:1], 0x10 v_and_b32_e32 v0, 0x3ff, v0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff v_cmp_gt_u32_e32 vcc_lo, s4, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[2:3], null, s14, s2, v[0:1] v_cmp_gt_u32_e64 s2, s5, v2 v_cmp_lt_i32_e64 s3, -1, v2 s_delay_alu instid0(VALU_DEP_2) s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) s_and_b32 s2, s2, s3 s_delay_alu instid0(SALU_CYCLE_1) s_and_b32 exec_lo, exec_lo, s2 s_cbranch_execz .LBB0_3 s_load_b128 s[0:3], s[0:1], 0x0 v_mad_u64_u32 v[3:4], null, v1, s5, v[2:3] v_mov_b32_e32 v1, 1 s_waitcnt lgkmcnt(0) global_load_u8 v0, v3, s[0:1] s_waitcnt vmcnt(0) v_lshlrev_b32_e32 v0, 2, v0 global_atomic_add_u32 v0, v1, s[2:3] .LBB0_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z22gpu_histo_kernel_naivePhPijj .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z22gpu_histo_kernel_naivePhPijj, .Lfunc_end0-_Z22gpu_histo_kernel_naivePhPijj .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z22gpu_histo_kernel_naivePhPijj .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z22gpu_histo_kernel_naivePhPijj.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0010991d_00000000-6_gpu_histo_kernel_naive.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z46__device_stub__Z22gpu_histo_kernel_naivePhPijjPhPijj .type _Z46__device_stub__Z22gpu_histo_kernel_naivePhPijjPhPijj, @function _Z46__device_stub__Z22gpu_histo_kernel_naivePhPijjPhPijj: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z22gpu_histo_kernel_naivePhPijj(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z46__device_stub__Z22gpu_histo_kernel_naivePhPijjPhPijj, .-_Z46__device_stub__Z22gpu_histo_kernel_naivePhPijjPhPijj .globl _Z22gpu_histo_kernel_naivePhPijj .type _Z22gpu_histo_kernel_naivePhPijj, @function _Z22gpu_histo_kernel_naivePhPijj: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z46__device_stub__Z22gpu_histo_kernel_naivePhPijjPhPijj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z22gpu_histo_kernel_naivePhPijj, .-_Z22gpu_histo_kernel_naivePhPijj .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z22gpu_histo_kernel_naivePhPijj" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z22gpu_histo_kernel_naivePhPijj(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "gpu_histo_kernel_naive.hip" .globl _Z37__device_stub__gpu_histo_kernel_naivePhPijj # -- Begin function _Z37__device_stub__gpu_histo_kernel_naivePhPijj .p2align 4, 0x90 .type _Z37__device_stub__gpu_histo_kernel_naivePhPijj,@function _Z37__device_stub__gpu_histo_kernel_naivePhPijj: # @_Z37__device_stub__gpu_histo_kernel_naivePhPijj .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z22gpu_histo_kernel_naivePhPijj, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z37__device_stub__gpu_histo_kernel_naivePhPijj, .Lfunc_end0-_Z37__device_stub__gpu_histo_kernel_naivePhPijj .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z22gpu_histo_kernel_naivePhPijj, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z22gpu_histo_kernel_naivePhPijj,@object # @_Z22gpu_histo_kernel_naivePhPijj .section .rodata,"a",@progbits .globl _Z22gpu_histo_kernel_naivePhPijj .p2align 3, 0x0 _Z22gpu_histo_kernel_naivePhPijj: .quad _Z37__device_stub__gpu_histo_kernel_naivePhPijj .size _Z22gpu_histo_kernel_naivePhPijj, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z22gpu_histo_kernel_naivePhPijj" .size .L__unnamed_1, 33 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z37__device_stub__gpu_histo_kernel_naivePhPijj .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z22gpu_histo_kernel_naivePhPijj .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
// // findCutoff.cu // //This file contains the function that determines the most //efficient number of assemblies to perform on the gpu #include <cuda.h> #include <iostream> void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen); void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[]); void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n); void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd); int findCutoff(int n, int accuracy) { //Variable declarations int x=n; int numAssemblies=0; int count=0; int *numbods; int odd; //Timer creation float time1; float time2; time1=0; time2=0; cudaEvent_t beginEvent; cudaEvent_t endEvent; cudaEventCreate( &beginEvent ); cudaEventCreate( &endEvent ); //Determine the number of assemblies needed to completely assemble n bodies while(x!=1) { if( x%2==0) { x=x/2; } else { x++; x=x/2; } numAssemblies++; } //Allocate space for a matrix that holds the number of bodies at each level of assembly numbods=(int*)malloc(sizeof(int)*numAssemblies); //Fill numbods x=n; while(count<numAssemblies) { numbods[count]=x; if(x%2==0) { x=x/2; } else { x++; x=x/2; } count++; } count=1; //Begin process of finding most efficient number of assemblies while((count<numAssemblies) && time1>=time2) //Compare time for gpu and cpu to complete assembly { //Create and allocate space for empty variables to mimic the dca algorithm at a level of assembly double* AF; double* Zs; double* Xs; double* nZs; double* nXs; double* AFo; AF=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*4*6); Zs=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*26*6); Xs=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*5*5); if(count==0) { nZs=(double*)malloc(sizeof(double)*26*6); nXs=(double*)malloc(sizeof(double)*25); AFo=(double*)malloc(sizeof(double)*6*4); } else { nZs=(double*)malloc(sizeof(double)*26*6*numbods[numAssemblies-count]); nXs=(double*)malloc(sizeof(double)*numbods[numAssemblies-count]*25); AFo=(double*)malloc(sizeof(double)*numbods[numAssemblies-count]*6*4); } //Check the parity of the number of bodies at the current level of assembly if(numbods[numAssemblies-1-count]%2==0) { odd=0; } else { odd=1; } //Check the gpu speed cudaEventRecord( beginEvent, 0 ); //Begin timer for(int i=0; i<accuracy; i++) //Perform the operations a set number of times to vary accuracy { //Test both assembly and disassembly at this level if(count==0) { cudaAssemble(Zs,Xs,numbods[numAssemblies-1-count],nZs,nXs,odd,1); cudaDisassemble(AFo,Zs,Xs,nZs,nXs,odd,numbods[numAssemblies-1-count],1,AF); } else { cudaAssemble(Zs,Xs,numbods[numAssemblies-1-count],nZs,nXs,odd,numbods[numAssemblies-count]); cudaDisassemble(AFo,Zs,Xs,nZs,nXs,odd,numbods[numAssemblies-1-count],numbods[numAssemblies-count],AF); } } //End timer cudaEventRecord( endEvent, 0 ); cudaEventSynchronize( endEvent ); cudaEventElapsedTime( &time1, beginEvent, endEvent ); //Check the cpu speed cudaEventRecord( beginEvent,0); //begin timing for(int i=0; i<accuracy; i++) //Perfom the operations a set number of times to vary accuracy { //Test both assembly and disassembly at this level if(count==0) { Assemble(Zs,Xs,nZs,nXs,1,odd,numbods[numAssemblies-1-count]); Disassemble(nZs,nXs,Zs,Xs,AFo,AF,numbods[numAssemblies-count],odd); } else { Assemble(Zs,Xs,nZs,nXs,numbods[numAssemblies-count],odd,numbods[numAssemblies-1-count]); Disassemble(nZs,nXs,Zs,Xs,AFo,AF,numbods[numAssemblies-count],odd); } } //End timer cudaEventRecord( endEvent,0); cudaEventSynchronize(endEvent); cudaEventElapsedTime( &time2,beginEvent, endEvent); count++; } return numAssemblies-count; }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
// // findCutoff.cu // //This file contains the function that determines the most //efficient number of assemblies to perform on the gpu #include <cuda.h> #include <iostream> void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen); void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[]); void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n); void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd); int findCutoff(int n, int accuracy) { //Variable declarations int x=n; int numAssemblies=0; int count=0; int *numbods; int odd; //Timer creation float time1; float time2; time1=0; time2=0; cudaEvent_t beginEvent; cudaEvent_t endEvent; cudaEventCreate( &beginEvent ); cudaEventCreate( &endEvent ); //Determine the number of assemblies needed to completely assemble n bodies while(x!=1) { if( x%2==0) { x=x/2; } else { x++; x=x/2; } numAssemblies++; } //Allocate space for a matrix that holds the number of bodies at each level of assembly numbods=(int*)malloc(sizeof(int)*numAssemblies); //Fill numbods x=n; while(count<numAssemblies) { numbods[count]=x; if(x%2==0) { x=x/2; } else { x++; x=x/2; } count++; } count=1; //Begin process of finding most efficient number of assemblies while((count<numAssemblies) && time1>=time2) //Compare time for gpu and cpu to complete assembly { //Create and allocate space for empty variables to mimic the dca algorithm at a level of assembly double* AF; double* Zs; double* Xs; double* nZs; double* nXs; double* AFo; AF=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*4*6); Zs=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*26*6); Xs=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*5*5); if(count==0) { nZs=(double*)malloc(sizeof(double)*26*6); nXs=(double*)malloc(sizeof(double)*25); AFo=(double*)malloc(sizeof(double)*6*4); } else { nZs=(double*)malloc(sizeof(double)*26*6*numbods[numAssemblies-count]); nXs=(double*)malloc(sizeof(double)*numbods[numAssemblies-count]*25); AFo=(double*)malloc(sizeof(double)*numbods[numAssemblies-count]*6*4); } //Check the parity of the number of bodies at the current level of assembly if(numbods[numAssemblies-1-count]%2==0) { odd=0; } else { odd=1; } //Check the gpu speed cudaEventRecord( beginEvent, 0 ); //Begin timer for(int i=0; i<accuracy; i++) //Perform the operations a set number of times to vary accuracy { //Test both assembly and disassembly at this level if(count==0) { cudaAssemble(Zs,Xs,numbods[numAssemblies-1-count],nZs,nXs,odd,1); cudaDisassemble(AFo,Zs,Xs,nZs,nXs,odd,numbods[numAssemblies-1-count],1,AF); } else { cudaAssemble(Zs,Xs,numbods[numAssemblies-1-count],nZs,nXs,odd,numbods[numAssemblies-count]); cudaDisassemble(AFo,Zs,Xs,nZs,nXs,odd,numbods[numAssemblies-1-count],numbods[numAssemblies-count],AF); } } //End timer cudaEventRecord( endEvent, 0 ); cudaEventSynchronize( endEvent ); cudaEventElapsedTime( &time1, beginEvent, endEvent ); //Check the cpu speed cudaEventRecord( beginEvent,0); //begin timing for(int i=0; i<accuracy; i++) //Perfom the operations a set number of times to vary accuracy { //Test both assembly and disassembly at this level if(count==0) { Assemble(Zs,Xs,nZs,nXs,1,odd,numbods[numAssemblies-1-count]); Disassemble(nZs,nXs,Zs,Xs,AFo,AF,numbods[numAssemblies-count],odd); } else { Assemble(Zs,Xs,nZs,nXs,numbods[numAssemblies-count],odd,numbods[numAssemblies-1-count]); Disassemble(nZs,nXs,Zs,Xs,AFo,AF,numbods[numAssemblies-count],odd); } } //End timer cudaEventRecord( endEvent,0); cudaEventSynchronize(endEvent); cudaEventElapsedTime( &time2,beginEvent, endEvent); count++; } return numAssemblies-count; }
.file "tmpxft_0019e6bc_00000000-6_findCutoff.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z10findCutoffii .type _Z10findCutoffii, @function _Z10findCutoffii: .LFB3669: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $136, %rsp .cfi_def_cfa_offset 192 movl %edi, %ebx movl %esi, 56(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax movl $0x00000000, 96(%rsp) movl $0x00000000, 100(%rsp) leaq 104(%rsp), %rdi call cudaEventCreate@PLT leaq 112(%rsp), %rdi call cudaEventCreate@PLT cmpl $1, %ebx je .L25 movl %ebx, %eax movl $0, %edx jmp .L7 .L5: addl $1, %eax movl %eax, %ecx shrl $31, %ecx addl %ecx, %eax sarl %eax .L6: leal 1(%rdx), %ecx cmpl $1, %eax je .L33 movl %ecx, %edx .L7: testb $1, %al jne .L5 movl %eax, %ecx shrl $31, %ecx addl %ecx, %eax sarl %eax jmp .L6 .L33: movl %eax, 32(%rsp) movl %ecx, %r15d movl %ecx, 88(%rsp) movl %edx, %r14d movl %edx, 92(%rsp) movslq %ecx, %rax salq $2, %rax movq %rax, 72(%rsp) movq %rax, %rdi call malloc@PLT movq %rax, %rdx testl %r15d, %r15d jle .L4 movl $0, %eax movl %r14d, %esi jmp .L10 .L8: addl $1, %ebx movl %ebx, %ecx shrl $31, %ecx addl %ecx, %ebx sarl %ebx .L9: addq $1, %rax leal -1(%rax), %ecx cmpl %ecx, %esi jle .L34 .L10: movl %ebx, (%rdx,%rax,4) testb $1, %bl jne .L8 movl %ebx, %ecx shrl $31, %ecx addl %ecx, %ebx sarl %ebx jmp .L9 .L34: cmpl $1, 88(%rsp) jle .L11 movq 72(%rsp), %rax leaq -4(%rdx,%rax), %rax movq %rax, 80(%rsp) movq %rdx, 64(%rsp) jmp .L12 .L39: movl $1248, %edi call malloc@PLT movq %rax, %r12 movl $200, %edi call malloc@PLT movq %rax, %r13 movl $192, %edi call malloc@PLT movq %rax, 24(%rsp) jmp .L14 .L36: subq $8, %rsp .cfi_def_cfa_offset 200 pushq $1 .cfi_def_cfa_offset 208 movl 52(%rsp), %r14d movl %r14d, %r9d movq %r13, %r8 movq %r12, %rcx movl %r15d, %edx movq %rbp, %rsi movq %rbx, %rdi call _Z12cudaAssemblePdS_iS_S_ii@PLT addq $8, %rsp .cfi_def_cfa_offset 200 pushq 24(%rsp) .cfi_def_cfa_offset 208 pushq $1 .cfi_def_cfa_offset 216 pushq %r15 .cfi_def_cfa_offset 224 movl %r14d, %r9d movq %r13, %r8 movq %r12, %rcx movq %rbp, %rdx movq %rbx, %rsi movq 56(%rsp), %rdi call _Z15cudaDisassemblePdS_S_S_S_iiiS_@PLT addq $32, %rsp .cfi_def_cfa_offset 192 .L17: movl 12(%rsp), %eax addl $1, %eax cmpl %eax, 56(%rsp) je .L35 movl %eax, 12(%rsp) .L18: cmpl $0, 32(%rsp) je .L36 movq 48(%rsp), %rax movl (%rax), %r14d subq $8, %rsp .cfi_def_cfa_offset 200 pushq %r14 .cfi_def_cfa_offset 208 movl 52(%rsp), %r9d movq %r13, %r8 movq %r12, %rcx movl %r15d, %edx movq %rbp, %rsi movq %rbx, %rdi call _Z12cudaAssemblePdS_iS_S_ii@PLT addq $8, %rsp .cfi_def_cfa_offset 200 pushq 24(%rsp) .cfi_def_cfa_offset 208 pushq %r14 .cfi_def_cfa_offset 216 pushq %r15 .cfi_def_cfa_offset 224 movl 68(%rsp), %r9d movq %r13, %r8 movq %r12, %rcx movq %rbp, %rdx movq %rbx, %rsi movq 56(%rsp), %rdi call _Z15cudaDisassemblePdS_S_S_S_iiiS_@PLT addq $32, %rsp .cfi_def_cfa_offset 192 jmp .L17 .L35: movl $0, %esi movq 112(%rsp), %rdi call cudaEventRecord@PLT movq 112(%rsp), %rdi call cudaEventSynchronize@PLT leaq 96(%rsp), %rdi movq 112(%rsp), %rdx movq 104(%rsp), %rsi call cudaEventElapsedTime@PLT movl $0, %esi movq 104(%rsp), %rdi call cudaEventRecord@PLT movl $0, %r14d movl %r15d, 60(%rsp) movq %rbx, 40(%rsp) movl 36(%rsp), %ebx jmp .L21 .L37: subq $8, %rsp .cfi_def_cfa_offset 200 movl 68(%rsp), %eax pushq %rax .cfi_def_cfa_offset 208 movl %ebx, %r9d movl $1, %r8d movq %r13, %rcx movq %r12, %rdx movq %rbp, %rsi movq 56(%rsp), %r15 movq %r15, %rdi call _Z8AssemblePdS_S_S_iii@PLT addq $16, %rsp .cfi_def_cfa_offset 192 pushq %rbx .cfi_def_cfa_offset 200 movq 72(%rsp), %rax movq 80(%rsp), %rcx movl (%rax,%rcx), %eax pushq %rax .cfi_def_cfa_offset 208 movq 32(%rsp), %r9 movq 40(%rsp), %r8 movq %rbp, %rcx movq %r15, %rdx movq %r13, %rsi movq %r12, %rdi call _Z11DisassemblePdS_S_S_S_S_ii@PLT addq $16, %rsp .cfi_def_cfa_offset 192 .L20: leal 1(%r14), %eax cmpl %r14d, 12(%rsp) je .L23 movl %eax, %r14d .L21: cmpl $0, 32(%rsp) je .L37 movq 48(%rsp), %rax movl (%rax), %r15d subq $8, %rsp .cfi_def_cfa_offset 200 movl 68(%rsp), %eax pushq %rax .cfi_def_cfa_offset 208 movl %ebx, %r9d movl %r15d, %r8d movq %r13, %rcx movq %r12, %rdx movq %rbp, %rsi movq 56(%rsp), %rdi call _Z8AssemblePdS_S_S_iii@PLT addq $16, %rsp .cfi_def_cfa_offset 192 pushq %rbx .cfi_def_cfa_offset 200 pushq %r15 .cfi_def_cfa_offset 208 movq 32(%rsp), %r9 movq 40(%rsp), %r8 movq %rbp, %rcx movq 56(%rsp), %rdx movq %r13, %rsi movq %r12, %rdi call _Z11DisassemblePdS_S_S_S_S_ii@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L20 .L29: movl %eax, 32(%rsp) .L11: movl 88(%rsp), %eax movl 32(%rsp), %ebx subl %ebx, %eax movq 120(%rsp), %rdx subq %fs:40, %rdx jne .L38 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movl $0, %esi movq 112(%rsp), %rdi call cudaEventRecord@PLT movq 112(%rsp), %rdi call cudaEventSynchronize@PLT leaq 96(%rsp), %rdi movq 112(%rsp), %rdx movq 104(%rsp), %rsi call cudaEventElapsedTime@PLT movl $0, %esi movq 104(%rsp), %rdi call cudaEventRecord@PLT .L23: movl $0, %esi movq 112(%rsp), %rdi call cudaEventRecord@PLT movq 112(%rsp), %rdi call cudaEventSynchronize@PLT leaq 100(%rsp), %rdi movq 112(%rsp), %rdx movq 104(%rsp), %rsi call cudaEventElapsedTime@PLT movl 32(%rsp), %ecx leal 1(%rcx), %eax subq $4, 80(%rsp) cmpl %ecx, 92(%rsp) jle .L29 movl %eax, 32(%rsp) .L12: movss 96(%rsp), %xmm0 comiss 100(%rsp), %xmm0 jb .L11 movq 80(%rsp), %rax movl -4(%rax), %r15d movslq %r15d, %rbp leaq 0(%rbp,%rbp,2), %rdi salq $6, %rdi call malloc@PLT movq %rax, 16(%rsp) imulq $1248, %rbp, %rdi call malloc@PLT movq %rax, %rbx leaq 0(%rbp,%rbp,4), %rax leaq (%rax,%rax,4), %rdi salq $3, %rdi call malloc@PLT movq %rax, %rbp cmpl $0, 32(%rsp) je .L39 movl 88(%rsp), %eax movl 32(%rsp), %esi subl %esi, %eax cltq movq 64(%rsp), %rdx movslq (%rdx,%rax,4), %r14 imulq $1248, %r14, %rdi call malloc@PLT movq %rax, %r12 leaq (%r14,%r14,4), %rax leaq (%rax,%rax,4), %rdi salq $3, %rdi call malloc@PLT movq %rax, %r13 leaq (%r14,%r14,2), %rdi salq $6, %rdi call malloc@PLT movq %rax, 24(%rsp) .L14: movl %r15d, %eax andl $1, %eax movl %eax, 36(%rsp) movl $0, %esi movq 104(%rsp), %rdi call cudaEventRecord@PLT cmpl $0, 56(%rsp) jle .L15 movl 88(%rsp), %eax movl 32(%rsp), %ecx subl %ecx, %eax cltq movq 64(%rsp), %rdx leaq (%rdx,%rax,4), %rax movq %rax, 48(%rsp) movl $0, 12(%rsp) jmp .L18 .L25: movl $0, 88(%rsp) .L4: movl $1, 32(%rsp) jmp .L11 .L38: call __stack_chk_fail@PLT .cfi_endproc .LFE3669: .size _Z10findCutoffii, .-_Z10findCutoffii .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
// // findCutoff.cu // //This file contains the function that determines the most //efficient number of assemblies to perform on the gpu #include <cuda.h> #include <iostream> void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen); void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[]); void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n); void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd); int findCutoff(int n, int accuracy) { //Variable declarations int x=n; int numAssemblies=0; int count=0; int *numbods; int odd; //Timer creation float time1; float time2; time1=0; time2=0; cudaEvent_t beginEvent; cudaEvent_t endEvent; cudaEventCreate( &beginEvent ); cudaEventCreate( &endEvent ); //Determine the number of assemblies needed to completely assemble n bodies while(x!=1) { if( x%2==0) { x=x/2; } else { x++; x=x/2; } numAssemblies++; } //Allocate space for a matrix that holds the number of bodies at each level of assembly numbods=(int*)malloc(sizeof(int)*numAssemblies); //Fill numbods x=n; while(count<numAssemblies) { numbods[count]=x; if(x%2==0) { x=x/2; } else { x++; x=x/2; } count++; } count=1; //Begin process of finding most efficient number of assemblies while((count<numAssemblies) && time1>=time2) //Compare time for gpu and cpu to complete assembly { //Create and allocate space for empty variables to mimic the dca algorithm at a level of assembly double* AF; double* Zs; double* Xs; double* nZs; double* nXs; double* AFo; AF=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*4*6); Zs=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*26*6); Xs=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*5*5); if(count==0) { nZs=(double*)malloc(sizeof(double)*26*6); nXs=(double*)malloc(sizeof(double)*25); AFo=(double*)malloc(sizeof(double)*6*4); } else { nZs=(double*)malloc(sizeof(double)*26*6*numbods[numAssemblies-count]); nXs=(double*)malloc(sizeof(double)*numbods[numAssemblies-count]*25); AFo=(double*)malloc(sizeof(double)*numbods[numAssemblies-count]*6*4); } //Check the parity of the number of bodies at the current level of assembly if(numbods[numAssemblies-1-count]%2==0) { odd=0; } else { odd=1; } //Check the gpu speed cudaEventRecord( beginEvent, 0 ); //Begin timer for(int i=0; i<accuracy; i++) //Perform the operations a set number of times to vary accuracy { //Test both assembly and disassembly at this level if(count==0) { cudaAssemble(Zs,Xs,numbods[numAssemblies-1-count],nZs,nXs,odd,1); cudaDisassemble(AFo,Zs,Xs,nZs,nXs,odd,numbods[numAssemblies-1-count],1,AF); } else { cudaAssemble(Zs,Xs,numbods[numAssemblies-1-count],nZs,nXs,odd,numbods[numAssemblies-count]); cudaDisassemble(AFo,Zs,Xs,nZs,nXs,odd,numbods[numAssemblies-1-count],numbods[numAssemblies-count],AF); } } //End timer cudaEventRecord( endEvent, 0 ); cudaEventSynchronize( endEvent ); cudaEventElapsedTime( &time1, beginEvent, endEvent ); //Check the cpu speed cudaEventRecord( beginEvent,0); //begin timing for(int i=0; i<accuracy; i++) //Perfom the operations a set number of times to vary accuracy { //Test both assembly and disassembly at this level if(count==0) { Assemble(Zs,Xs,nZs,nXs,1,odd,numbods[numAssemblies-1-count]); Disassemble(nZs,nXs,Zs,Xs,AFo,AF,numbods[numAssemblies-count],odd); } else { Assemble(Zs,Xs,nZs,nXs,numbods[numAssemblies-count],odd,numbods[numAssemblies-1-count]); Disassemble(nZs,nXs,Zs,Xs,AFo,AF,numbods[numAssemblies-count],odd); } } //End timer cudaEventRecord( endEvent,0); cudaEventSynchronize(endEvent); cudaEventElapsedTime( &time2,beginEvent, endEvent); count++; } return numAssemblies-count; }
// // findCutoff.cu // //This file contains the function that determines the most //efficient number of assemblies to perform on the gpu #include <hip/hip_runtime.h> #include <iostream> void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen); void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[]); void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n); void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd); int findCutoff(int n, int accuracy) { //Variable declarations int x=n; int numAssemblies=0; int count=0; int *numbods; int odd; //Timer creation float time1; float time2; time1=0; time2=0; hipEvent_t beginEvent; hipEvent_t endEvent; hipEventCreate( &beginEvent ); hipEventCreate( &endEvent ); //Determine the number of assemblies needed to completely assemble n bodies while(x!=1) { if( x%2==0) { x=x/2; } else { x++; x=x/2; } numAssemblies++; } //Allocate space for a matrix that holds the number of bodies at each level of assembly numbods=(int*)malloc(sizeof(int)*numAssemblies); //Fill numbods x=n; while(count<numAssemblies) { numbods[count]=x; if(x%2==0) { x=x/2; } else { x++; x=x/2; } count++; } count=1; //Begin process of finding most efficient number of assemblies while((count<numAssemblies) && time1>=time2) //Compare time for gpu and cpu to complete assembly { //Create and allocate space for empty variables to mimic the dca algorithm at a level of assembly double* AF; double* Zs; double* Xs; double* nZs; double* nXs; double* AFo; AF=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*4*6); Zs=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*26*6); Xs=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*5*5); if(count==0) { nZs=(double*)malloc(sizeof(double)*26*6); nXs=(double*)malloc(sizeof(double)*25); AFo=(double*)malloc(sizeof(double)*6*4); } else { nZs=(double*)malloc(sizeof(double)*26*6*numbods[numAssemblies-count]); nXs=(double*)malloc(sizeof(double)*numbods[numAssemblies-count]*25); AFo=(double*)malloc(sizeof(double)*numbods[numAssemblies-count]*6*4); } //Check the parity of the number of bodies at the current level of assembly if(numbods[numAssemblies-1-count]%2==0) { odd=0; } else { odd=1; } //Check the gpu speed hipEventRecord( beginEvent, 0 ); //Begin timer for(int i=0; i<accuracy; i++) //Perform the operations a set number of times to vary accuracy { //Test both assembly and disassembly at this level if(count==0) { cudaAssemble(Zs,Xs,numbods[numAssemblies-1-count],nZs,nXs,odd,1); cudaDisassemble(AFo,Zs,Xs,nZs,nXs,odd,numbods[numAssemblies-1-count],1,AF); } else { cudaAssemble(Zs,Xs,numbods[numAssemblies-1-count],nZs,nXs,odd,numbods[numAssemblies-count]); cudaDisassemble(AFo,Zs,Xs,nZs,nXs,odd,numbods[numAssemblies-1-count],numbods[numAssemblies-count],AF); } } //End timer hipEventRecord( endEvent, 0 ); hipEventSynchronize( endEvent ); hipEventElapsedTime( &time1, beginEvent, endEvent ); //Check the cpu speed hipEventRecord( beginEvent,0); //begin timing for(int i=0; i<accuracy; i++) //Perfom the operations a set number of times to vary accuracy { //Test both assembly and disassembly at this level if(count==0) { Assemble(Zs,Xs,nZs,nXs,1,odd,numbods[numAssemblies-1-count]); Disassemble(nZs,nXs,Zs,Xs,AFo,AF,numbods[numAssemblies-count],odd); } else { Assemble(Zs,Xs,nZs,nXs,numbods[numAssemblies-count],odd,numbods[numAssemblies-1-count]); Disassemble(nZs,nXs,Zs,Xs,AFo,AF,numbods[numAssemblies-count],odd); } } //End timer hipEventRecord( endEvent,0); hipEventSynchronize(endEvent); hipEventElapsedTime( &time2,beginEvent, endEvent); count++; } return numAssemblies-count; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
// // findCutoff.cu // //This file contains the function that determines the most //efficient number of assemblies to perform on the gpu #include <hip/hip_runtime.h> #include <iostream> void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen); void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[]); void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n); void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd); int findCutoff(int n, int accuracy) { //Variable declarations int x=n; int numAssemblies=0; int count=0; int *numbods; int odd; //Timer creation float time1; float time2; time1=0; time2=0; hipEvent_t beginEvent; hipEvent_t endEvent; hipEventCreate( &beginEvent ); hipEventCreate( &endEvent ); //Determine the number of assemblies needed to completely assemble n bodies while(x!=1) { if( x%2==0) { x=x/2; } else { x++; x=x/2; } numAssemblies++; } //Allocate space for a matrix that holds the number of bodies at each level of assembly numbods=(int*)malloc(sizeof(int)*numAssemblies); //Fill numbods x=n; while(count<numAssemblies) { numbods[count]=x; if(x%2==0) { x=x/2; } else { x++; x=x/2; } count++; } count=1; //Begin process of finding most efficient number of assemblies while((count<numAssemblies) && time1>=time2) //Compare time for gpu and cpu to complete assembly { //Create and allocate space for empty variables to mimic the dca algorithm at a level of assembly double* AF; double* Zs; double* Xs; double* nZs; double* nXs; double* AFo; AF=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*4*6); Zs=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*26*6); Xs=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*5*5); if(count==0) { nZs=(double*)malloc(sizeof(double)*26*6); nXs=(double*)malloc(sizeof(double)*25); AFo=(double*)malloc(sizeof(double)*6*4); } else { nZs=(double*)malloc(sizeof(double)*26*6*numbods[numAssemblies-count]); nXs=(double*)malloc(sizeof(double)*numbods[numAssemblies-count]*25); AFo=(double*)malloc(sizeof(double)*numbods[numAssemblies-count]*6*4); } //Check the parity of the number of bodies at the current level of assembly if(numbods[numAssemblies-1-count]%2==0) { odd=0; } else { odd=1; } //Check the gpu speed hipEventRecord( beginEvent, 0 ); //Begin timer for(int i=0; i<accuracy; i++) //Perform the operations a set number of times to vary accuracy { //Test both assembly and disassembly at this level if(count==0) { cudaAssemble(Zs,Xs,numbods[numAssemblies-1-count],nZs,nXs,odd,1); cudaDisassemble(AFo,Zs,Xs,nZs,nXs,odd,numbods[numAssemblies-1-count],1,AF); } else { cudaAssemble(Zs,Xs,numbods[numAssemblies-1-count],nZs,nXs,odd,numbods[numAssemblies-count]); cudaDisassemble(AFo,Zs,Xs,nZs,nXs,odd,numbods[numAssemblies-1-count],numbods[numAssemblies-count],AF); } } //End timer hipEventRecord( endEvent, 0 ); hipEventSynchronize( endEvent ); hipEventElapsedTime( &time1, beginEvent, endEvent ); //Check the cpu speed hipEventRecord( beginEvent,0); //begin timing for(int i=0; i<accuracy; i++) //Perfom the operations a set number of times to vary accuracy { //Test both assembly and disassembly at this level if(count==0) { Assemble(Zs,Xs,nZs,nXs,1,odd,numbods[numAssemblies-1-count]); Disassemble(nZs,nXs,Zs,Xs,AFo,AF,numbods[numAssemblies-count],odd); } else { Assemble(Zs,Xs,nZs,nXs,numbods[numAssemblies-count],odd,numbods[numAssemblies-1-count]); Disassemble(nZs,nXs,Zs,Xs,AFo,AF,numbods[numAssemblies-count],odd); } } //End timer hipEventRecord( endEvent,0); hipEventSynchronize(endEvent); hipEventElapsedTime( &time2,beginEvent, endEvent); count++; } return numAssemblies-count; }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
// // findCutoff.cu // //This file contains the function that determines the most //efficient number of assemblies to perform on the gpu #include <hip/hip_runtime.h> #include <iostream> void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen); void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[]); void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n); void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd); int findCutoff(int n, int accuracy) { //Variable declarations int x=n; int numAssemblies=0; int count=0; int *numbods; int odd; //Timer creation float time1; float time2; time1=0; time2=0; hipEvent_t beginEvent; hipEvent_t endEvent; hipEventCreate( &beginEvent ); hipEventCreate( &endEvent ); //Determine the number of assemblies needed to completely assemble n bodies while(x!=1) { if( x%2==0) { x=x/2; } else { x++; x=x/2; } numAssemblies++; } //Allocate space for a matrix that holds the number of bodies at each level of assembly numbods=(int*)malloc(sizeof(int)*numAssemblies); //Fill numbods x=n; while(count<numAssemblies) { numbods[count]=x; if(x%2==0) { x=x/2; } else { x++; x=x/2; } count++; } count=1; //Begin process of finding most efficient number of assemblies while((count<numAssemblies) && time1>=time2) //Compare time for gpu and cpu to complete assembly { //Create and allocate space for empty variables to mimic the dca algorithm at a level of assembly double* AF; double* Zs; double* Xs; double* nZs; double* nXs; double* AFo; AF=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*4*6); Zs=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*26*6); Xs=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*5*5); if(count==0) { nZs=(double*)malloc(sizeof(double)*26*6); nXs=(double*)malloc(sizeof(double)*25); AFo=(double*)malloc(sizeof(double)*6*4); } else { nZs=(double*)malloc(sizeof(double)*26*6*numbods[numAssemblies-count]); nXs=(double*)malloc(sizeof(double)*numbods[numAssemblies-count]*25); AFo=(double*)malloc(sizeof(double)*numbods[numAssemblies-count]*6*4); } //Check the parity of the number of bodies at the current level of assembly if(numbods[numAssemblies-1-count]%2==0) { odd=0; } else { odd=1; } //Check the gpu speed hipEventRecord( beginEvent, 0 ); //Begin timer for(int i=0; i<accuracy; i++) //Perform the operations a set number of times to vary accuracy { //Test both assembly and disassembly at this level if(count==0) { cudaAssemble(Zs,Xs,numbods[numAssemblies-1-count],nZs,nXs,odd,1); cudaDisassemble(AFo,Zs,Xs,nZs,nXs,odd,numbods[numAssemblies-1-count],1,AF); } else { cudaAssemble(Zs,Xs,numbods[numAssemblies-1-count],nZs,nXs,odd,numbods[numAssemblies-count]); cudaDisassemble(AFo,Zs,Xs,nZs,nXs,odd,numbods[numAssemblies-1-count],numbods[numAssemblies-count],AF); } } //End timer hipEventRecord( endEvent, 0 ); hipEventSynchronize( endEvent ); hipEventElapsedTime( &time1, beginEvent, endEvent ); //Check the cpu speed hipEventRecord( beginEvent,0); //begin timing for(int i=0; i<accuracy; i++) //Perfom the operations a set number of times to vary accuracy { //Test both assembly and disassembly at this level if(count==0) { Assemble(Zs,Xs,nZs,nXs,1,odd,numbods[numAssemblies-1-count]); Disassemble(nZs,nXs,Zs,Xs,AFo,AF,numbods[numAssemblies-count],odd); } else { Assemble(Zs,Xs,nZs,nXs,numbods[numAssemblies-count],odd,numbods[numAssemblies-1-count]); Disassemble(nZs,nXs,Zs,Xs,AFo,AF,numbods[numAssemblies-count],odd); } } //End timer hipEventRecord( endEvent,0); hipEventSynchronize(endEvent); hipEventElapsedTime( &time2,beginEvent, endEvent); count++; } return numAssemblies-count; }
.text .file "findCutoff.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z10findCutoffii # -- Begin function _Z10findCutoffii .p2align 4, 0x90 .type _Z10findCutoffii,@function _Z10findCutoffii: # @_Z10findCutoffii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $152, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %esi, 28(%rsp) # 4-byte Spill movl %edi, %ebx movl $0, 12(%rsp) movl $0, 8(%rsp) leaq 48(%rsp), %rdi callq hipEventCreate leaq 16(%rsp), %rdi callq hipEventCreate xorl %ebp, %ebp cmpl $1, %ebx je .LBB0_3 # %bb.1: # %.lr.ph.preheader xorl %ebp, %ebp movl %ebx, %eax .p2align 4, 0x90 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movl %eax, %ecx andl $1, %ecx addl %eax, %ecx movl %ecx, %eax sarl %eax incl %ebp cmpl $2, %ecx jne .LBB0_2 .LBB0_3: # %._crit_edge movl %ebp, %r14d leaq (,%r14,4), %rdi callq malloc movq %rax, %r15 testl %ebp, %ebp je .LBB0_6 # %bb.4: # %.lr.ph150.preheader xorl %eax, %eax movl %ebx, %ecx .p2align 4, 0x90 .LBB0_5: # %.lr.ph150 # =>This Inner Loop Header: Depth=1 movl %ebx, (%r15,%rax,4) andl $1, %ecx addl %ebx, %ecx sarl %ecx incq %rax movl %ecx, %ebx cmpq %rax, %r14 jne .LBB0_5 .LBB0_6: # %.preheader movl $1, %ebx cmpl $2, %ebp jb .LBB0_13 # %bb.7: # %.preheader movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero ucomiss 8(%rsp), %xmm0 jb .LBB0_13 # %bb.8: # %.lr.ph160 movslq %ebp, %rax movq %rax, 120(%rsp) # 8-byte Spill movl $1, %ebx movl %ebp, 60(%rsp) # 4-byte Spill movq %r14, 136(%rsp) # 8-byte Spill movq %r15, 128(%rsp) # 8-byte Spill .p2align 4, 0x90 .LBB0_9: # =>This Loop Header: Depth=1 # Child Loop BB0_14 Depth 2 # Child Loop BB0_15 Depth 2 movl %ebx, %eax notl %eax addl %ebp, %eax cltq movslq (%r15,%rax,4), %r13 movq %r13, %rax shlq $6, %rax leaq (%rax,%rax,2), %rdi callq malloc movq %rax, 64(%rsp) # 8-byte Spill imulq $1248, %r13, %rdi # imm = 0x4E0 callq malloc movq %rax, 104(%rsp) # 8-byte Spill imulq $200, %r13, %rdi callq malloc movq %rax, 96(%rsp) # 8-byte Spill movq 120(%rsp), %rax # 8-byte Reload movq %rbx, 144(%rsp) # 8-byte Spill subq %rbx, %rax movslq (%r15,%rax,4), %rbx movq %rbx, 40(%rsp) # 8-byte Spill imulq $1248, %rbx, %rdi # imm = 0x4E0 callq malloc movq %rax, 88(%rsp) # 8-byte Spill imulq $200, %rbx, %rdi callq malloc movq %rax, 80(%rsp) # 8-byte Spill movq %rbx, %rax shlq $6, %rax leaq (%rax,%rax,2), %rdi callq malloc movq %rax, 72(%rsp) # 8-byte Spill movq %r13, 112(%rsp) # 8-byte Spill # kill: def $r13d killed $r13d killed $r13 def $r13 andl $1, %r13d movq %r13, 32(%rsp) # 8-byte Spill movq 48(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movl 28(%rsp), %eax # 4-byte Reload movl %eax, %r12d testl %eax, %eax jle .LBB0_10 .p2align 4, 0x90 .LBB0_14: # %.lr.ph153 # Parent Loop BB0_9 Depth=1 # => This Inner Loop Header: Depth=2 subq $8, %rsp .cfi_adjust_cfa_offset 8 movq 112(%rsp), %rbx # 8-byte Reload movq %rbx, %rdi movq 104(%rsp), %rbp # 8-byte Reload movq %rbp, %rsi movq 120(%rsp), %r14 # 8-byte Reload movl %r14d, %edx movq 96(%rsp), %r13 # 8-byte Reload movq %r13, %rcx movq 88(%rsp), %r15 # 8-byte Reload movq %r15, %r8 movq 40(%rsp), %r9 # 8-byte Reload # kill: def $r9d killed $r9d killed $r9 pushq 48(%rsp) # 8-byte Folded Reload .cfi_adjust_cfa_offset 8 callq _Z12cudaAssemblePdS_iS_S_ii addq $8, %rsp .cfi_adjust_cfa_offset -8 movq 80(%rsp), %rdi # 8-byte Reload movq %rbx, %rsi movq %rbp, %rdx movq %r13, %rcx movq %r15, %r8 movq 40(%rsp), %r9 # 8-byte Reload # kill: def $r9d killed $r9d killed $r9 pushq 72(%rsp) # 8-byte Folded Reload .cfi_adjust_cfa_offset 8 pushq 56(%rsp) # 8-byte Folded Reload .cfi_adjust_cfa_offset 8 pushq %r14 .cfi_adjust_cfa_offset 8 callq _Z15cudaDisassemblePdS_S_S_S_iiiS_ addq $32, %rsp .cfi_adjust_cfa_offset -32 decl %r12d jne .LBB0_14 .LBB0_10: # %._crit_edge154 # in Loop: Header=BB0_9 Depth=1 movq 16(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 16(%rsp), %rdi callq hipEventSynchronize movq 48(%rsp), %rsi movq 16(%rsp), %rdx leaq 12(%rsp), %rdi callq hipEventElapsedTime movq 48(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movl 28(%rsp), %eax # 4-byte Reload movl %eax, %r12d testl %eax, %eax movq 64(%rsp), %r14 # 8-byte Reload jle .LBB0_11 .p2align 4, 0x90 .LBB0_15: # %.lr.ph157 # Parent Loop BB0_9 Depth=1 # => This Inner Loop Header: Depth=2 subq $8, %rsp .cfi_adjust_cfa_offset 8 movq 112(%rsp), %rbx # 8-byte Reload movq %rbx, %rdi movq 104(%rsp), %rbp # 8-byte Reload movq %rbp, %rsi movq 96(%rsp), %r13 # 8-byte Reload movq %r13, %rdx movq 88(%rsp), %r15 # 8-byte Reload movq %r15, %rcx movq 48(%rsp), %r8 # 8-byte Reload # kill: def $r8d killed $r8d killed $r8 movq 40(%rsp), %r9 # 8-byte Reload # kill: def $r9d killed $r9d killed $r9 pushq 120(%rsp) # 8-byte Folded Reload .cfi_adjust_cfa_offset 8 callq _Z8AssemblePdS_S_S_iii addq $16, %rsp .cfi_adjust_cfa_offset -16 movq %r13, %rdi movq %r15, %rsi movq %rbx, %rdx movq %rbp, %rcx movq 72(%rsp), %r8 # 8-byte Reload movq %r14, %r9 pushq 32(%rsp) # 8-byte Folded Reload .cfi_adjust_cfa_offset 8 pushq 48(%rsp) # 8-byte Folded Reload .cfi_adjust_cfa_offset 8 callq _Z11DisassemblePdS_S_S_S_S_ii addq $16, %rsp .cfi_adjust_cfa_offset -16 decl %r12d jne .LBB0_15 .LBB0_11: # %._crit_edge158 # in Loop: Header=BB0_9 Depth=1 movq 16(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 16(%rsp), %rdi callq hipEventSynchronize movq 48(%rsp), %rsi movq 16(%rsp), %rdx leaq 8(%rsp), %rdi callq hipEventElapsedTime movq 144(%rsp), %rbx # 8-byte Reload incq %rbx movq 136(%rsp), %r14 # 8-byte Reload cmpq %r14, %rbx movl 60(%rsp), %ebp # 4-byte Reload movq 128(%rsp), %r15 # 8-byte Reload jae .LBB0_13 # %bb.12: # %._crit_edge158 # in Loop: Header=BB0_9 Depth=1 movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero ucomiss 8(%rsp), %xmm0 jae .LBB0_9 .LBB0_13: # %._crit_edge161 subl %ebx, %ebp movl %ebp, %eax addq $152, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z10findCutoffii, .Lfunc_end0-_Z10findCutoffii .cfi_endproc # -- End function .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0019e6bc_00000000-6_findCutoff.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z10findCutoffii .type _Z10findCutoffii, @function _Z10findCutoffii: .LFB3669: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $136, %rsp .cfi_def_cfa_offset 192 movl %edi, %ebx movl %esi, 56(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax movl $0x00000000, 96(%rsp) movl $0x00000000, 100(%rsp) leaq 104(%rsp), %rdi call cudaEventCreate@PLT leaq 112(%rsp), %rdi call cudaEventCreate@PLT cmpl $1, %ebx je .L25 movl %ebx, %eax movl $0, %edx jmp .L7 .L5: addl $1, %eax movl %eax, %ecx shrl $31, %ecx addl %ecx, %eax sarl %eax .L6: leal 1(%rdx), %ecx cmpl $1, %eax je .L33 movl %ecx, %edx .L7: testb $1, %al jne .L5 movl %eax, %ecx shrl $31, %ecx addl %ecx, %eax sarl %eax jmp .L6 .L33: movl %eax, 32(%rsp) movl %ecx, %r15d movl %ecx, 88(%rsp) movl %edx, %r14d movl %edx, 92(%rsp) movslq %ecx, %rax salq $2, %rax movq %rax, 72(%rsp) movq %rax, %rdi call malloc@PLT movq %rax, %rdx testl %r15d, %r15d jle .L4 movl $0, %eax movl %r14d, %esi jmp .L10 .L8: addl $1, %ebx movl %ebx, %ecx shrl $31, %ecx addl %ecx, %ebx sarl %ebx .L9: addq $1, %rax leal -1(%rax), %ecx cmpl %ecx, %esi jle .L34 .L10: movl %ebx, (%rdx,%rax,4) testb $1, %bl jne .L8 movl %ebx, %ecx shrl $31, %ecx addl %ecx, %ebx sarl %ebx jmp .L9 .L34: cmpl $1, 88(%rsp) jle .L11 movq 72(%rsp), %rax leaq -4(%rdx,%rax), %rax movq %rax, 80(%rsp) movq %rdx, 64(%rsp) jmp .L12 .L39: movl $1248, %edi call malloc@PLT movq %rax, %r12 movl $200, %edi call malloc@PLT movq %rax, %r13 movl $192, %edi call malloc@PLT movq %rax, 24(%rsp) jmp .L14 .L36: subq $8, %rsp .cfi_def_cfa_offset 200 pushq $1 .cfi_def_cfa_offset 208 movl 52(%rsp), %r14d movl %r14d, %r9d movq %r13, %r8 movq %r12, %rcx movl %r15d, %edx movq %rbp, %rsi movq %rbx, %rdi call _Z12cudaAssemblePdS_iS_S_ii@PLT addq $8, %rsp .cfi_def_cfa_offset 200 pushq 24(%rsp) .cfi_def_cfa_offset 208 pushq $1 .cfi_def_cfa_offset 216 pushq %r15 .cfi_def_cfa_offset 224 movl %r14d, %r9d movq %r13, %r8 movq %r12, %rcx movq %rbp, %rdx movq %rbx, %rsi movq 56(%rsp), %rdi call _Z15cudaDisassemblePdS_S_S_S_iiiS_@PLT addq $32, %rsp .cfi_def_cfa_offset 192 .L17: movl 12(%rsp), %eax addl $1, %eax cmpl %eax, 56(%rsp) je .L35 movl %eax, 12(%rsp) .L18: cmpl $0, 32(%rsp) je .L36 movq 48(%rsp), %rax movl (%rax), %r14d subq $8, %rsp .cfi_def_cfa_offset 200 pushq %r14 .cfi_def_cfa_offset 208 movl 52(%rsp), %r9d movq %r13, %r8 movq %r12, %rcx movl %r15d, %edx movq %rbp, %rsi movq %rbx, %rdi call _Z12cudaAssemblePdS_iS_S_ii@PLT addq $8, %rsp .cfi_def_cfa_offset 200 pushq 24(%rsp) .cfi_def_cfa_offset 208 pushq %r14 .cfi_def_cfa_offset 216 pushq %r15 .cfi_def_cfa_offset 224 movl 68(%rsp), %r9d movq %r13, %r8 movq %r12, %rcx movq %rbp, %rdx movq %rbx, %rsi movq 56(%rsp), %rdi call _Z15cudaDisassemblePdS_S_S_S_iiiS_@PLT addq $32, %rsp .cfi_def_cfa_offset 192 jmp .L17 .L35: movl $0, %esi movq 112(%rsp), %rdi call cudaEventRecord@PLT movq 112(%rsp), %rdi call cudaEventSynchronize@PLT leaq 96(%rsp), %rdi movq 112(%rsp), %rdx movq 104(%rsp), %rsi call cudaEventElapsedTime@PLT movl $0, %esi movq 104(%rsp), %rdi call cudaEventRecord@PLT movl $0, %r14d movl %r15d, 60(%rsp) movq %rbx, 40(%rsp) movl 36(%rsp), %ebx jmp .L21 .L37: subq $8, %rsp .cfi_def_cfa_offset 200 movl 68(%rsp), %eax pushq %rax .cfi_def_cfa_offset 208 movl %ebx, %r9d movl $1, %r8d movq %r13, %rcx movq %r12, %rdx movq %rbp, %rsi movq 56(%rsp), %r15 movq %r15, %rdi call _Z8AssemblePdS_S_S_iii@PLT addq $16, %rsp .cfi_def_cfa_offset 192 pushq %rbx .cfi_def_cfa_offset 200 movq 72(%rsp), %rax movq 80(%rsp), %rcx movl (%rax,%rcx), %eax pushq %rax .cfi_def_cfa_offset 208 movq 32(%rsp), %r9 movq 40(%rsp), %r8 movq %rbp, %rcx movq %r15, %rdx movq %r13, %rsi movq %r12, %rdi call _Z11DisassemblePdS_S_S_S_S_ii@PLT addq $16, %rsp .cfi_def_cfa_offset 192 .L20: leal 1(%r14), %eax cmpl %r14d, 12(%rsp) je .L23 movl %eax, %r14d .L21: cmpl $0, 32(%rsp) je .L37 movq 48(%rsp), %rax movl (%rax), %r15d subq $8, %rsp .cfi_def_cfa_offset 200 movl 68(%rsp), %eax pushq %rax .cfi_def_cfa_offset 208 movl %ebx, %r9d movl %r15d, %r8d movq %r13, %rcx movq %r12, %rdx movq %rbp, %rsi movq 56(%rsp), %rdi call _Z8AssemblePdS_S_S_iii@PLT addq $16, %rsp .cfi_def_cfa_offset 192 pushq %rbx .cfi_def_cfa_offset 200 pushq %r15 .cfi_def_cfa_offset 208 movq 32(%rsp), %r9 movq 40(%rsp), %r8 movq %rbp, %rcx movq 56(%rsp), %rdx movq %r13, %rsi movq %r12, %rdi call _Z11DisassemblePdS_S_S_S_S_ii@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L20 .L29: movl %eax, 32(%rsp) .L11: movl 88(%rsp), %eax movl 32(%rsp), %ebx subl %ebx, %eax movq 120(%rsp), %rdx subq %fs:40, %rdx jne .L38 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movl $0, %esi movq 112(%rsp), %rdi call cudaEventRecord@PLT movq 112(%rsp), %rdi call cudaEventSynchronize@PLT leaq 96(%rsp), %rdi movq 112(%rsp), %rdx movq 104(%rsp), %rsi call cudaEventElapsedTime@PLT movl $0, %esi movq 104(%rsp), %rdi call cudaEventRecord@PLT .L23: movl $0, %esi movq 112(%rsp), %rdi call cudaEventRecord@PLT movq 112(%rsp), %rdi call cudaEventSynchronize@PLT leaq 100(%rsp), %rdi movq 112(%rsp), %rdx movq 104(%rsp), %rsi call cudaEventElapsedTime@PLT movl 32(%rsp), %ecx leal 1(%rcx), %eax subq $4, 80(%rsp) cmpl %ecx, 92(%rsp) jle .L29 movl %eax, 32(%rsp) .L12: movss 96(%rsp), %xmm0 comiss 100(%rsp), %xmm0 jb .L11 movq 80(%rsp), %rax movl -4(%rax), %r15d movslq %r15d, %rbp leaq 0(%rbp,%rbp,2), %rdi salq $6, %rdi call malloc@PLT movq %rax, 16(%rsp) imulq $1248, %rbp, %rdi call malloc@PLT movq %rax, %rbx leaq 0(%rbp,%rbp,4), %rax leaq (%rax,%rax,4), %rdi salq $3, %rdi call malloc@PLT movq %rax, %rbp cmpl $0, 32(%rsp) je .L39 movl 88(%rsp), %eax movl 32(%rsp), %esi subl %esi, %eax cltq movq 64(%rsp), %rdx movslq (%rdx,%rax,4), %r14 imulq $1248, %r14, %rdi call malloc@PLT movq %rax, %r12 leaq (%r14,%r14,4), %rax leaq (%rax,%rax,4), %rdi salq $3, %rdi call malloc@PLT movq %rax, %r13 leaq (%r14,%r14,2), %rdi salq $6, %rdi call malloc@PLT movq %rax, 24(%rsp) .L14: movl %r15d, %eax andl $1, %eax movl %eax, 36(%rsp) movl $0, %esi movq 104(%rsp), %rdi call cudaEventRecord@PLT cmpl $0, 56(%rsp) jle .L15 movl 88(%rsp), %eax movl 32(%rsp), %ecx subl %ecx, %eax cltq movq 64(%rsp), %rdx leaq (%rdx,%rax,4), %rax movq %rax, 48(%rsp) movl $0, 12(%rsp) jmp .L18 .L25: movl $0, 88(%rsp) .L4: movl $1, 32(%rsp) jmp .L11 .L38: call __stack_chk_fail@PLT .cfi_endproc .LFE3669: .size _Z10findCutoffii, .-_Z10findCutoffii .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "findCutoff.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z10findCutoffii # -- Begin function _Z10findCutoffii .p2align 4, 0x90 .type _Z10findCutoffii,@function _Z10findCutoffii: # @_Z10findCutoffii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $152, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %esi, 28(%rsp) # 4-byte Spill movl %edi, %ebx movl $0, 12(%rsp) movl $0, 8(%rsp) leaq 48(%rsp), %rdi callq hipEventCreate leaq 16(%rsp), %rdi callq hipEventCreate xorl %ebp, %ebp cmpl $1, %ebx je .LBB0_3 # %bb.1: # %.lr.ph.preheader xorl %ebp, %ebp movl %ebx, %eax .p2align 4, 0x90 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movl %eax, %ecx andl $1, %ecx addl %eax, %ecx movl %ecx, %eax sarl %eax incl %ebp cmpl $2, %ecx jne .LBB0_2 .LBB0_3: # %._crit_edge movl %ebp, %r14d leaq (,%r14,4), %rdi callq malloc movq %rax, %r15 testl %ebp, %ebp je .LBB0_6 # %bb.4: # %.lr.ph150.preheader xorl %eax, %eax movl %ebx, %ecx .p2align 4, 0x90 .LBB0_5: # %.lr.ph150 # =>This Inner Loop Header: Depth=1 movl %ebx, (%r15,%rax,4) andl $1, %ecx addl %ebx, %ecx sarl %ecx incq %rax movl %ecx, %ebx cmpq %rax, %r14 jne .LBB0_5 .LBB0_6: # %.preheader movl $1, %ebx cmpl $2, %ebp jb .LBB0_13 # %bb.7: # %.preheader movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero ucomiss 8(%rsp), %xmm0 jb .LBB0_13 # %bb.8: # %.lr.ph160 movslq %ebp, %rax movq %rax, 120(%rsp) # 8-byte Spill movl $1, %ebx movl %ebp, 60(%rsp) # 4-byte Spill movq %r14, 136(%rsp) # 8-byte Spill movq %r15, 128(%rsp) # 8-byte Spill .p2align 4, 0x90 .LBB0_9: # =>This Loop Header: Depth=1 # Child Loop BB0_14 Depth 2 # Child Loop BB0_15 Depth 2 movl %ebx, %eax notl %eax addl %ebp, %eax cltq movslq (%r15,%rax,4), %r13 movq %r13, %rax shlq $6, %rax leaq (%rax,%rax,2), %rdi callq malloc movq %rax, 64(%rsp) # 8-byte Spill imulq $1248, %r13, %rdi # imm = 0x4E0 callq malloc movq %rax, 104(%rsp) # 8-byte Spill imulq $200, %r13, %rdi callq malloc movq %rax, 96(%rsp) # 8-byte Spill movq 120(%rsp), %rax # 8-byte Reload movq %rbx, 144(%rsp) # 8-byte Spill subq %rbx, %rax movslq (%r15,%rax,4), %rbx movq %rbx, 40(%rsp) # 8-byte Spill imulq $1248, %rbx, %rdi # imm = 0x4E0 callq malloc movq %rax, 88(%rsp) # 8-byte Spill imulq $200, %rbx, %rdi callq malloc movq %rax, 80(%rsp) # 8-byte Spill movq %rbx, %rax shlq $6, %rax leaq (%rax,%rax,2), %rdi callq malloc movq %rax, 72(%rsp) # 8-byte Spill movq %r13, 112(%rsp) # 8-byte Spill # kill: def $r13d killed $r13d killed $r13 def $r13 andl $1, %r13d movq %r13, 32(%rsp) # 8-byte Spill movq 48(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movl 28(%rsp), %eax # 4-byte Reload movl %eax, %r12d testl %eax, %eax jle .LBB0_10 .p2align 4, 0x90 .LBB0_14: # %.lr.ph153 # Parent Loop BB0_9 Depth=1 # => This Inner Loop Header: Depth=2 subq $8, %rsp .cfi_adjust_cfa_offset 8 movq 112(%rsp), %rbx # 8-byte Reload movq %rbx, %rdi movq 104(%rsp), %rbp # 8-byte Reload movq %rbp, %rsi movq 120(%rsp), %r14 # 8-byte Reload movl %r14d, %edx movq 96(%rsp), %r13 # 8-byte Reload movq %r13, %rcx movq 88(%rsp), %r15 # 8-byte Reload movq %r15, %r8 movq 40(%rsp), %r9 # 8-byte Reload # kill: def $r9d killed $r9d killed $r9 pushq 48(%rsp) # 8-byte Folded Reload .cfi_adjust_cfa_offset 8 callq _Z12cudaAssemblePdS_iS_S_ii addq $8, %rsp .cfi_adjust_cfa_offset -8 movq 80(%rsp), %rdi # 8-byte Reload movq %rbx, %rsi movq %rbp, %rdx movq %r13, %rcx movq %r15, %r8 movq 40(%rsp), %r9 # 8-byte Reload # kill: def $r9d killed $r9d killed $r9 pushq 72(%rsp) # 8-byte Folded Reload .cfi_adjust_cfa_offset 8 pushq 56(%rsp) # 8-byte Folded Reload .cfi_adjust_cfa_offset 8 pushq %r14 .cfi_adjust_cfa_offset 8 callq _Z15cudaDisassemblePdS_S_S_S_iiiS_ addq $32, %rsp .cfi_adjust_cfa_offset -32 decl %r12d jne .LBB0_14 .LBB0_10: # %._crit_edge154 # in Loop: Header=BB0_9 Depth=1 movq 16(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 16(%rsp), %rdi callq hipEventSynchronize movq 48(%rsp), %rsi movq 16(%rsp), %rdx leaq 12(%rsp), %rdi callq hipEventElapsedTime movq 48(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movl 28(%rsp), %eax # 4-byte Reload movl %eax, %r12d testl %eax, %eax movq 64(%rsp), %r14 # 8-byte Reload jle .LBB0_11 .p2align 4, 0x90 .LBB0_15: # %.lr.ph157 # Parent Loop BB0_9 Depth=1 # => This Inner Loop Header: Depth=2 subq $8, %rsp .cfi_adjust_cfa_offset 8 movq 112(%rsp), %rbx # 8-byte Reload movq %rbx, %rdi movq 104(%rsp), %rbp # 8-byte Reload movq %rbp, %rsi movq 96(%rsp), %r13 # 8-byte Reload movq %r13, %rdx movq 88(%rsp), %r15 # 8-byte Reload movq %r15, %rcx movq 48(%rsp), %r8 # 8-byte Reload # kill: def $r8d killed $r8d killed $r8 movq 40(%rsp), %r9 # 8-byte Reload # kill: def $r9d killed $r9d killed $r9 pushq 120(%rsp) # 8-byte Folded Reload .cfi_adjust_cfa_offset 8 callq _Z8AssemblePdS_S_S_iii addq $16, %rsp .cfi_adjust_cfa_offset -16 movq %r13, %rdi movq %r15, %rsi movq %rbx, %rdx movq %rbp, %rcx movq 72(%rsp), %r8 # 8-byte Reload movq %r14, %r9 pushq 32(%rsp) # 8-byte Folded Reload .cfi_adjust_cfa_offset 8 pushq 48(%rsp) # 8-byte Folded Reload .cfi_adjust_cfa_offset 8 callq _Z11DisassemblePdS_S_S_S_S_ii addq $16, %rsp .cfi_adjust_cfa_offset -16 decl %r12d jne .LBB0_15 .LBB0_11: # %._crit_edge158 # in Loop: Header=BB0_9 Depth=1 movq 16(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 16(%rsp), %rdi callq hipEventSynchronize movq 48(%rsp), %rsi movq 16(%rsp), %rdx leaq 8(%rsp), %rdi callq hipEventElapsedTime movq 144(%rsp), %rbx # 8-byte Reload incq %rbx movq 136(%rsp), %r14 # 8-byte Reload cmpq %r14, %rbx movl 60(%rsp), %ebp # 4-byte Reload movq 128(%rsp), %r15 # 8-byte Reload jae .LBB0_13 # %bb.12: # %._crit_edge158 # in Loop: Header=BB0_9 Depth=1 movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero ucomiss 8(%rsp), %xmm0 jae .LBB0_9 .LBB0_13: # %._crit_edge161 subl %ebx, %ebp movl %ebp, %eax addq $152, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z10findCutoffii, .Lfunc_end0-_Z10findCutoffii .cfi_endproc # -- End function .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> #include <cassert> #include <time.h> //Initializing CUDA kernel //Called from CPU, runs in GPU __global__ void vector_add(int *a, int *b, int *c, int n) { //calculating globad tid int tid = blockIdx.x * blockDim.x + threadIdx.x; //checking if the tid is not out of bounds if(tid<n) c[tid] = a[tid] + b[tid]; } void verify_results(int *a, int *b, int *c, int n) { //Asserting that the results calculated are correct for(int i=0; i<n; i++) { assert(c[i] == a[i] + b[i]); } } int main() { //Performing operations for 65536 numbers int n = 1<<16; //Pointers for CPU vectors int *h_a, *h_b, *h_c; //Pointers for GPU vectors int *d_a, *d_b, *d_c; //Calculate memory needed for each vector size_t bytes = n*sizeof(int); //Allocate calculated memory on CPU or host h_a = (int *) malloc(bytes); h_b = (int *) malloc(bytes); h_c = (int *) malloc(bytes); //Allocate memory on GPU cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); //Initializing arrays with random numbers for (int i=0; i<n; i++) { h_a[i] = rand(); h_b[i] = rand(); } //Copying arrays from CPU to GPU cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); //No. of threads per block int num_threads = 1024; //No. of Thread Blocks int num_blocks = (int) ceil(float(n) / num_threads); //Starting time to calculate time taken on GPU clock_t start = clock(); //Launch kernel on GPU vector_add<<<num_blocks, num_threads>>>(d_a, d_b, d_c, n); //Recording end time clock_t end = clock(); //Copying results from GPU to CPU cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); //Verifying results verify_results(h_a, h_b, h_c, n); //Free CUDA Memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); //Free CPU Memory free(h_a); free(h_b); free(h_c); double time_taken = double(end - start) / CLOCKS_PER_SEC; std::cout << "Time Taken on GPU: " << time_taken << std::endl; std::cout << "Completed Successfully" << std::endl; return 0; }
code for sm_80 Function : _Z10vector_addPiS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x000fc800078e0207 */ /*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x0c0fe400078e0207 */ /*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fe200078e0207 */ /*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */ /* 0x004fca0007ffe0ff */ /*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <iostream> #include <cassert> #include <time.h> //Initializing CUDA kernel //Called from CPU, runs in GPU __global__ void vector_add(int *a, int *b, int *c, int n) { //calculating globad tid int tid = blockIdx.x * blockDim.x + threadIdx.x; //checking if the tid is not out of bounds if(tid<n) c[tid] = a[tid] + b[tid]; } void verify_results(int *a, int *b, int *c, int n) { //Asserting that the results calculated are correct for(int i=0; i<n; i++) { assert(c[i] == a[i] + b[i]); } } int main() { //Performing operations for 65536 numbers int n = 1<<16; //Pointers for CPU vectors int *h_a, *h_b, *h_c; //Pointers for GPU vectors int *d_a, *d_b, *d_c; //Calculate memory needed for each vector size_t bytes = n*sizeof(int); //Allocate calculated memory on CPU or host h_a = (int *) malloc(bytes); h_b = (int *) malloc(bytes); h_c = (int *) malloc(bytes); //Allocate memory on GPU cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); //Initializing arrays with random numbers for (int i=0; i<n; i++) { h_a[i] = rand(); h_b[i] = rand(); } //Copying arrays from CPU to GPU cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); //No. of threads per block int num_threads = 1024; //No. of Thread Blocks int num_blocks = (int) ceil(float(n) / num_threads); //Starting time to calculate time taken on GPU clock_t start = clock(); //Launch kernel on GPU vector_add<<<num_blocks, num_threads>>>(d_a, d_b, d_c, n); //Recording end time clock_t end = clock(); //Copying results from GPU to CPU cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); //Verifying results verify_results(h_a, h_b, h_c, n); //Free CUDA Memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); //Free CPU Memory free(h_a); free(h_b); free(h_c); double time_taken = double(end - start) / CLOCKS_PER_SEC; std::cout << "Time Taken on GPU: " << time_taken << std::endl; std::cout << "Completed Successfully" << std::endl; return 0; }
.file "tmpxft_00002588_00000000-6_vector_add.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3673: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3673: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z14verify_resultsPiS_S_i .type _Z14verify_resultsPiS_S_i, @function _Z14verify_resultsPiS_S_i: .LFB3669: .cfi_startproc endbr64 testl %ecx, %ecx jle .L3 movl $0, %eax .L5: addl $1, %eax cmpl %eax, %ecx jne .L5 .L3: ret .cfi_endproc .LFE3669: .size _Z14verify_resultsPiS_S_i, .-_Z14verify_resultsPiS_S_i .globl _Z35__device_stub__Z10vector_addPiS_S_iPiS_S_i .type _Z35__device_stub__Z10vector_addPiS_S_iPiS_S_i, @function _Z35__device_stub__Z10vector_addPiS_S_iPiS_S_i: .LFB3695: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L11 .L7: movq 136(%rsp), %rax subq %fs:40, %rax jne .L12 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z10vector_addPiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L7 .L12: call __stack_chk_fail@PLT .cfi_endproc .LFE3695: .size _Z35__device_stub__Z10vector_addPiS_S_iPiS_S_i, .-_Z35__device_stub__Z10vector_addPiS_S_iPiS_S_i .globl _Z10vector_addPiS_S_i .type _Z10vector_addPiS_S_i, @function _Z10vector_addPiS_S_i: .LFB3696: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z35__device_stub__Z10vector_addPiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3696: .size _Z10vector_addPiS_S_i, .-_Z10vector_addPiS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "Time Taken on GPU: " .LC2: .string "Completed Successfully" .text .globl main .type main, @function main: .LFB3670: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $64, %rsp .cfi_def_cfa_offset 112 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $262144, %edi call malloc@PLT movq %rax, %r12 movl $262144, %edi call malloc@PLT movq %rax, %rbp movl $262144, %edi call malloc@PLT movq %rax, %r13 leaq 8(%rsp), %rdi movl $262144, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movl $262144, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $262144, %esi call cudaMalloc@PLT movl $0, %ebx .L16: call rand@PLT movl %eax, (%r12,%rbx) call rand@PLT movl %eax, 0(%rbp,%rbx) addq $4, %rbx cmpq $262144, %rbx jne .L16 movl $1, %ecx movl $262144, %edx movq %r12, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $262144, %edx movq %rbp, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT call clock@PLT movq %rax, %r14 movl $1024, 44(%rsp) movl $1, 48(%rsp) movl $64, 32(%rsp) movl $1, 36(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L21 .L17: call clock@PLT movq %rax, %rbx movl $2, %ecx movl $262144, %edx movq 24(%rsp), %rsi movq %r13, %rdi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq %r12, %rdi call free@PLT movq %rbp, %rdi call free@PLT movq %r13, %rdi call free@PLT subq %r14, %rbx pxor %xmm0, %xmm0 cvtsi2sdq %rbx, %xmm0 divsd .LC0(%rip), %xmm0 movq %xmm0, %rbx leaq .LC1(%rip), %rsi leaq _ZSt4cout(%rip), %rbp movq %rbp, %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movq %rbx, %xmm0 call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT leaq .LC2(%rip), %rsi movq %rbp, %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L22 movl $0, %eax addq $64, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L21: .cfi_restore_state movl $65536, %ecx movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z35__device_stub__Z10vector_addPiS_S_iPiS_S_i jmp .L17 .L22: call __stack_chk_fail@PLT .cfi_endproc .LFE3670: .size main, .-main .section .rodata.str1.1 .LC3: .string "_Z10vector_addPiS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3698: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z10vector_addPiS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3698: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 0 .long 1093567616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <cassert> #include <time.h> //Initializing CUDA kernel //Called from CPU, runs in GPU __global__ void vector_add(int *a, int *b, int *c, int n) { //calculating globad tid int tid = blockIdx.x * blockDim.x + threadIdx.x; //checking if the tid is not out of bounds if(tid<n) c[tid] = a[tid] + b[tid]; } void verify_results(int *a, int *b, int *c, int n) { //Asserting that the results calculated are correct for(int i=0; i<n; i++) { assert(c[i] == a[i] + b[i]); } } int main() { //Performing operations for 65536 numbers int n = 1<<16; //Pointers for CPU vectors int *h_a, *h_b, *h_c; //Pointers for GPU vectors int *d_a, *d_b, *d_c; //Calculate memory needed for each vector size_t bytes = n*sizeof(int); //Allocate calculated memory on CPU or host h_a = (int *) malloc(bytes); h_b = (int *) malloc(bytes); h_c = (int *) malloc(bytes); //Allocate memory on GPU cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); //Initializing arrays with random numbers for (int i=0; i<n; i++) { h_a[i] = rand(); h_b[i] = rand(); } //Copying arrays from CPU to GPU cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); //No. of threads per block int num_threads = 1024; //No. of Thread Blocks int num_blocks = (int) ceil(float(n) / num_threads); //Starting time to calculate time taken on GPU clock_t start = clock(); //Launch kernel on GPU vector_add<<<num_blocks, num_threads>>>(d_a, d_b, d_c, n); //Recording end time clock_t end = clock(); //Copying results from GPU to CPU cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); //Verifying results verify_results(h_a, h_b, h_c, n); //Free CUDA Memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); //Free CPU Memory free(h_a); free(h_b); free(h_c); double time_taken = double(end - start) / CLOCKS_PER_SEC; std::cout << "Time Taken on GPU: " << time_taken << std::endl; std::cout << "Completed Successfully" << std::endl; return 0; }
#include <hip/hip_runtime.h> #include <iostream> #include <cassert> #include <time.h> //Initializing CUDA kernel //Called from CPU, runs in GPU __global__ void vector_add(int *a, int *b, int *c, int n) { //calculating globad tid int tid = blockIdx.x * blockDim.x + threadIdx.x; //checking if the tid is not out of bounds if(tid<n) c[tid] = a[tid] + b[tid]; } void verify_results(int *a, int *b, int *c, int n) { //Asserting that the results calculated are correct for(int i=0; i<n; i++) { assert(c[i] == a[i] + b[i]); } } int main() { //Performing operations for 65536 numbers int n = 1<<16; //Pointers for CPU vectors int *h_a, *h_b, *h_c; //Pointers for GPU vectors int *d_a, *d_b, *d_c; //Calculate memory needed for each vector size_t bytes = n*sizeof(int); //Allocate calculated memory on CPU or host h_a = (int *) malloc(bytes); h_b = (int *) malloc(bytes); h_c = (int *) malloc(bytes); //Allocate memory on GPU hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); //Initializing arrays with random numbers for (int i=0; i<n; i++) { h_a[i] = rand(); h_b[i] = rand(); } //Copying arrays from CPU to GPU hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice); //No. of threads per block int num_threads = 1024; //No. of Thread Blocks int num_blocks = (int) ceil(float(n) / num_threads); //Starting time to calculate time taken on GPU clock_t start = clock(); //Launch kernel on GPU vector_add<<<num_blocks, num_threads>>>(d_a, d_b, d_c, n); //Recording end time clock_t end = clock(); //Copying results from GPU to CPU hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost); //Verifying results verify_results(h_a, h_b, h_c, n); //Free CUDA Memory hipFree(d_a); hipFree(d_b); hipFree(d_c); //Free CPU Memory free(h_a); free(h_b); free(h_c); double time_taken = double(end - start) / CLOCKS_PER_SEC; std::cout << "Time Taken on GPU: " << time_taken << std::endl; std::cout << "Completed Successfully" << std::endl; return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <cassert> #include <time.h> //Initializing CUDA kernel //Called from CPU, runs in GPU __global__ void vector_add(int *a, int *b, int *c, int n) { //calculating globad tid int tid = blockIdx.x * blockDim.x + threadIdx.x; //checking if the tid is not out of bounds if(tid<n) c[tid] = a[tid] + b[tid]; } void verify_results(int *a, int *b, int *c, int n) { //Asserting that the results calculated are correct for(int i=0; i<n; i++) { assert(c[i] == a[i] + b[i]); } } int main() { //Performing operations for 65536 numbers int n = 1<<16; //Pointers for CPU vectors int *h_a, *h_b, *h_c; //Pointers for GPU vectors int *d_a, *d_b, *d_c; //Calculate memory needed for each vector size_t bytes = n*sizeof(int); //Allocate calculated memory on CPU or host h_a = (int *) malloc(bytes); h_b = (int *) malloc(bytes); h_c = (int *) malloc(bytes); //Allocate memory on GPU hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); //Initializing arrays with random numbers for (int i=0; i<n; i++) { h_a[i] = rand(); h_b[i] = rand(); } //Copying arrays from CPU to GPU hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice); //No. of threads per block int num_threads = 1024; //No. of Thread Blocks int num_blocks = (int) ceil(float(n) / num_threads); //Starting time to calculate time taken on GPU clock_t start = clock(); //Launch kernel on GPU vector_add<<<num_blocks, num_threads>>>(d_a, d_b, d_c, n); //Recording end time clock_t end = clock(); //Copying results from GPU to CPU hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost); //Verifying results verify_results(h_a, h_b, h_c, n); //Free CUDA Memory hipFree(d_a); hipFree(d_b); hipFree(d_c); //Free CPU Memory free(h_a); free(h_b); free(h_c); double time_taken = double(end - start) / CLOCKS_PER_SEC; std::cout << "Time Taken on GPU: " << time_taken << std::endl; std::cout << "Completed Successfully" << std::endl; return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z10vector_addPiS_S_i .globl _Z10vector_addPiS_S_i .p2align 8 .type _Z10vector_addPiS_S_i,@function _Z10vector_addPiS_S_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_load_b64 s[0:1], s[0:1], 0x10 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) v_add_nc_u32_e32 v2, v3, v2 global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z10vector_addPiS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z10vector_addPiS_S_i, .Lfunc_end0-_Z10vector_addPiS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z10vector_addPiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z10vector_addPiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <cassert> #include <time.h> //Initializing CUDA kernel //Called from CPU, runs in GPU __global__ void vector_add(int *a, int *b, int *c, int n) { //calculating globad tid int tid = blockIdx.x * blockDim.x + threadIdx.x; //checking if the tid is not out of bounds if(tid<n) c[tid] = a[tid] + b[tid]; } void verify_results(int *a, int *b, int *c, int n) { //Asserting that the results calculated are correct for(int i=0; i<n; i++) { assert(c[i] == a[i] + b[i]); } } int main() { //Performing operations for 65536 numbers int n = 1<<16; //Pointers for CPU vectors int *h_a, *h_b, *h_c; //Pointers for GPU vectors int *d_a, *d_b, *d_c; //Calculate memory needed for each vector size_t bytes = n*sizeof(int); //Allocate calculated memory on CPU or host h_a = (int *) malloc(bytes); h_b = (int *) malloc(bytes); h_c = (int *) malloc(bytes); //Allocate memory on GPU hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); //Initializing arrays with random numbers for (int i=0; i<n; i++) { h_a[i] = rand(); h_b[i] = rand(); } //Copying arrays from CPU to GPU hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice); //No. of threads per block int num_threads = 1024; //No. of Thread Blocks int num_blocks = (int) ceil(float(n) / num_threads); //Starting time to calculate time taken on GPU clock_t start = clock(); //Launch kernel on GPU vector_add<<<num_blocks, num_threads>>>(d_a, d_b, d_c, n); //Recording end time clock_t end = clock(); //Copying results from GPU to CPU hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost); //Verifying results verify_results(h_a, h_b, h_c, n); //Free CUDA Memory hipFree(d_a); hipFree(d_b); hipFree(d_c); //Free CPU Memory free(h_a); free(h_b); free(h_c); double time_taken = double(end - start) / CLOCKS_PER_SEC; std::cout << "Time Taken on GPU: " << time_taken << std::endl; std::cout << "Completed Successfully" << std::endl; return 0; }
.text .file "vector_add.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z25__device_stub__vector_addPiS_S_i # -- Begin function _Z25__device_stub__vector_addPiS_S_i .p2align 4, 0x90 .type _Z25__device_stub__vector_addPiS_S_i,@function _Z25__device_stub__vector_addPiS_S_i: # @_Z25__device_stub__vector_addPiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z10vector_addPiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z25__device_stub__vector_addPiS_S_i, .Lfunc_end0-_Z25__device_stub__vector_addPiS_S_i .cfi_endproc # -- End function .globl _Z14verify_resultsPiS_S_i # -- Begin function _Z14verify_resultsPiS_S_i .p2align 4, 0x90 .type _Z14verify_resultsPiS_S_i,@function _Z14verify_resultsPiS_S_i: # @_Z14verify_resultsPiS_S_i .cfi_startproc # %bb.0: retq .Lfunc_end1: .size _Z14verify_resultsPiS_S_i, .Lfunc_end1-_Z14verify_resultsPiS_S_i .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI2_0: .quad 0x412e848000000000 # double 1.0E+6 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $144, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl $262144, %edi # imm = 0x40000 callq malloc movq %rax, %rbx movl $262144, %edi # imm = 0x40000 callq malloc movq %rax, %r14 movl $262144, %edi # imm = 0x40000 callq malloc movq %rax, %r15 leaq 16(%rsp), %rdi movl $262144, %esi # imm = 0x40000 callq hipMalloc leaq 8(%rsp), %rdi movl $262144, %esi # imm = 0x40000 callq hipMalloc movq %rsp, %rdi movl $262144, %esi # imm = 0x40000 callq hipMalloc xorl %r12d, %r12d .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 callq rand movl %eax, (%rbx,%r12,4) callq rand movl %eax, (%r14,%r12,4) incq %r12 cmpq $65536, %r12 # imm = 0x10000 jne .LBB2_1 # %bb.2: movq 16(%rsp), %rdi movl $262144, %edx # imm = 0x40000 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movq 8(%rsp), %rdi movl $262144, %edx # imm = 0x40000 movq %r14, %rsi movl $1, %ecx callq hipMemcpy callq clock movq %rax, %r12 movabsq $4294967360, %rdi # imm = 0x100000040 leaq 960(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_4 # %bb.3: movq 16(%rsp), %rax movq 8(%rsp), %rcx movq (%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) movl $65536, 28(%rsp) # imm = 0x10000 leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 28(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z10vector_addPiS_S_i, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_4: callq clock movq %rax, %r13 movq (%rsp), %rsi movl $262144, %edx # imm = 0x40000 movq %r15, %rdi movl $2, %ecx callq hipMemcpy movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq (%rsp), %rdi callq hipFree movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free subq %r12, %r13 cvtsi2sd %r13, %xmm0 divsd .LCPI2_0(%rip), %xmm0 movsd %xmm0, 32(%rsp) # 8-byte Spill movl $_ZSt4cout, %edi movl $.L.str, %esi movl $19, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movsd 32(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero callq _ZNSo9_M_insertIdEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB2_13 # %bb.5: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%rbx) je .LBB2_7 # %bb.6: movzbl 67(%rbx), %ecx jmp .LBB2_8 .LBB2_7: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB2_8: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $22, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB2_13 # %bb.9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i37 cmpb $0, 56(%rbx) je .LBB2_11 # %bb.10: movzbl 67(%rbx), %eax jmp .LBB2_12 .LBB2_11: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB2_12: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit40 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv xorl %eax, %eax addq $144, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB2_13: .cfi_def_cfa_offset 192 callq _ZSt16__throw_bad_castv .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10vector_addPiS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z10vector_addPiS_S_i,@object # @_Z10vector_addPiS_S_i .section .rodata,"a",@progbits .globl _Z10vector_addPiS_S_i .p2align 3, 0x0 _Z10vector_addPiS_S_i: .quad _Z25__device_stub__vector_addPiS_S_i .size _Z10vector_addPiS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Time Taken on GPU: " .size .L.str, 20 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Completed Successfully" .size .L.str.1, 23 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z10vector_addPiS_S_i" .size .L__unnamed_1, 22 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__vector_addPiS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10vector_addPiS_S_i .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z10vector_addPiS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x000fc800078e0207 */ /*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x0c0fe400078e0207 */ /*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fe200078e0207 */ /*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */ /* 0x004fca0007ffe0ff */ /*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z10vector_addPiS_S_i .globl _Z10vector_addPiS_S_i .p2align 8 .type _Z10vector_addPiS_S_i,@function _Z10vector_addPiS_S_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_load_b64 s[0:1], s[0:1], 0x10 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) v_add_nc_u32_e32 v2, v3, v2 global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z10vector_addPiS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z10vector_addPiS_S_i, .Lfunc_end0-_Z10vector_addPiS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z10vector_addPiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z10vector_addPiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00002588_00000000-6_vector_add.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3673: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3673: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z14verify_resultsPiS_S_i .type _Z14verify_resultsPiS_S_i, @function _Z14verify_resultsPiS_S_i: .LFB3669: .cfi_startproc endbr64 testl %ecx, %ecx jle .L3 movl $0, %eax .L5: addl $1, %eax cmpl %eax, %ecx jne .L5 .L3: ret .cfi_endproc .LFE3669: .size _Z14verify_resultsPiS_S_i, .-_Z14verify_resultsPiS_S_i .globl _Z35__device_stub__Z10vector_addPiS_S_iPiS_S_i .type _Z35__device_stub__Z10vector_addPiS_S_iPiS_S_i, @function _Z35__device_stub__Z10vector_addPiS_S_iPiS_S_i: .LFB3695: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L11 .L7: movq 136(%rsp), %rax subq %fs:40, %rax jne .L12 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z10vector_addPiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L7 .L12: call __stack_chk_fail@PLT .cfi_endproc .LFE3695: .size _Z35__device_stub__Z10vector_addPiS_S_iPiS_S_i, .-_Z35__device_stub__Z10vector_addPiS_S_iPiS_S_i .globl _Z10vector_addPiS_S_i .type _Z10vector_addPiS_S_i, @function _Z10vector_addPiS_S_i: .LFB3696: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z35__device_stub__Z10vector_addPiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3696: .size _Z10vector_addPiS_S_i, .-_Z10vector_addPiS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "Time Taken on GPU: " .LC2: .string "Completed Successfully" .text .globl main .type main, @function main: .LFB3670: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $64, %rsp .cfi_def_cfa_offset 112 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $262144, %edi call malloc@PLT movq %rax, %r12 movl $262144, %edi call malloc@PLT movq %rax, %rbp movl $262144, %edi call malloc@PLT movq %rax, %r13 leaq 8(%rsp), %rdi movl $262144, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movl $262144, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $262144, %esi call cudaMalloc@PLT movl $0, %ebx .L16: call rand@PLT movl %eax, (%r12,%rbx) call rand@PLT movl %eax, 0(%rbp,%rbx) addq $4, %rbx cmpq $262144, %rbx jne .L16 movl $1, %ecx movl $262144, %edx movq %r12, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $262144, %edx movq %rbp, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT call clock@PLT movq %rax, %r14 movl $1024, 44(%rsp) movl $1, 48(%rsp) movl $64, 32(%rsp) movl $1, 36(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L21 .L17: call clock@PLT movq %rax, %rbx movl $2, %ecx movl $262144, %edx movq 24(%rsp), %rsi movq %r13, %rdi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq %r12, %rdi call free@PLT movq %rbp, %rdi call free@PLT movq %r13, %rdi call free@PLT subq %r14, %rbx pxor %xmm0, %xmm0 cvtsi2sdq %rbx, %xmm0 divsd .LC0(%rip), %xmm0 movq %xmm0, %rbx leaq .LC1(%rip), %rsi leaq _ZSt4cout(%rip), %rbp movq %rbp, %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movq %rbx, %xmm0 call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT leaq .LC2(%rip), %rsi movq %rbp, %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L22 movl $0, %eax addq $64, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L21: .cfi_restore_state movl $65536, %ecx movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z35__device_stub__Z10vector_addPiS_S_iPiS_S_i jmp .L17 .L22: call __stack_chk_fail@PLT .cfi_endproc .LFE3670: .size main, .-main .section .rodata.str1.1 .LC3: .string "_Z10vector_addPiS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3698: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z10vector_addPiS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3698: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 0 .long 1093567616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "vector_add.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z25__device_stub__vector_addPiS_S_i # -- Begin function _Z25__device_stub__vector_addPiS_S_i .p2align 4, 0x90 .type _Z25__device_stub__vector_addPiS_S_i,@function _Z25__device_stub__vector_addPiS_S_i: # @_Z25__device_stub__vector_addPiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z10vector_addPiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z25__device_stub__vector_addPiS_S_i, .Lfunc_end0-_Z25__device_stub__vector_addPiS_S_i .cfi_endproc # -- End function .globl _Z14verify_resultsPiS_S_i # -- Begin function _Z14verify_resultsPiS_S_i .p2align 4, 0x90 .type _Z14verify_resultsPiS_S_i,@function _Z14verify_resultsPiS_S_i: # @_Z14verify_resultsPiS_S_i .cfi_startproc # %bb.0: retq .Lfunc_end1: .size _Z14verify_resultsPiS_S_i, .Lfunc_end1-_Z14verify_resultsPiS_S_i .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI2_0: .quad 0x412e848000000000 # double 1.0E+6 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $144, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl $262144, %edi # imm = 0x40000 callq malloc movq %rax, %rbx movl $262144, %edi # imm = 0x40000 callq malloc movq %rax, %r14 movl $262144, %edi # imm = 0x40000 callq malloc movq %rax, %r15 leaq 16(%rsp), %rdi movl $262144, %esi # imm = 0x40000 callq hipMalloc leaq 8(%rsp), %rdi movl $262144, %esi # imm = 0x40000 callq hipMalloc movq %rsp, %rdi movl $262144, %esi # imm = 0x40000 callq hipMalloc xorl %r12d, %r12d .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 callq rand movl %eax, (%rbx,%r12,4) callq rand movl %eax, (%r14,%r12,4) incq %r12 cmpq $65536, %r12 # imm = 0x10000 jne .LBB2_1 # %bb.2: movq 16(%rsp), %rdi movl $262144, %edx # imm = 0x40000 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movq 8(%rsp), %rdi movl $262144, %edx # imm = 0x40000 movq %r14, %rsi movl $1, %ecx callq hipMemcpy callq clock movq %rax, %r12 movabsq $4294967360, %rdi # imm = 0x100000040 leaq 960(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_4 # %bb.3: movq 16(%rsp), %rax movq 8(%rsp), %rcx movq (%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) movl $65536, 28(%rsp) # imm = 0x10000 leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 28(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z10vector_addPiS_S_i, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_4: callq clock movq %rax, %r13 movq (%rsp), %rsi movl $262144, %edx # imm = 0x40000 movq %r15, %rdi movl $2, %ecx callq hipMemcpy movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq (%rsp), %rdi callq hipFree movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free subq %r12, %r13 cvtsi2sd %r13, %xmm0 divsd .LCPI2_0(%rip), %xmm0 movsd %xmm0, 32(%rsp) # 8-byte Spill movl $_ZSt4cout, %edi movl $.L.str, %esi movl $19, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movsd 32(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero callq _ZNSo9_M_insertIdEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB2_13 # %bb.5: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%rbx) je .LBB2_7 # %bb.6: movzbl 67(%rbx), %ecx jmp .LBB2_8 .LBB2_7: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB2_8: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $22, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB2_13 # %bb.9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i37 cmpb $0, 56(%rbx) je .LBB2_11 # %bb.10: movzbl 67(%rbx), %eax jmp .LBB2_12 .LBB2_11: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB2_12: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit40 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv xorl %eax, %eax addq $144, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB2_13: .cfi_def_cfa_offset 192 callq _ZSt16__throw_bad_castv .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10vector_addPiS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z10vector_addPiS_S_i,@object # @_Z10vector_addPiS_S_i .section .rodata,"a",@progbits .globl _Z10vector_addPiS_S_i .p2align 3, 0x0 _Z10vector_addPiS_S_i: .quad _Z25__device_stub__vector_addPiS_S_i .size _Z10vector_addPiS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Time Taken on GPU: " .size .L.str, 20 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Completed Successfully" .size .L.str.1, 23 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z10vector_addPiS_S_i" .size .L__unnamed_1, 22 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__vector_addPiS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10vector_addPiS_S_i .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <pthread.h> #include <stdio.h> #include <string.h> #include <math.h> #ifndef D #define D 10000 #endif #ifndef N_FILES #define N_FILES 21000 #endif #define ARG_COUNT 4 #define MAX_FILE_NAME 100 #define GMEM_GRANULARITY 128 #define INV_DICT_WIDTH ((unsigned int)(ceil(N_FILES / (float)(sizeof(int)*8)))) #define INV_DICT_WIDTH_PAD ((INV_DICT_WIDTH/GMEM_GRANULARITY+1)*GMEM_GRANULARITY) #define INV_DICT_SIZE (D*INV_DICT_WIDTH_PAD) #define BLOCK_SIZE 64 #define N_BLOCKS ((unsigned int)(ceil(N_FILES/ (float)(sizeof(int)*8*BLOCK_SIZE)))) #define N_THREADS (BLOCK_SIZE*N_BLOCKS) unsigned int *inv_dictionary; char files[N_FILES][MAX_FILE_NAME]; char query[D]; unsigned int query_ones[D]; unsigned int matches[N_FILES]; // inv_dict_width = width of inv dictionary in number of elements // int_size = size of an integer in bytes __global__ void queryKernel(unsigned int * __restrict__ inv_dictionary, unsigned int inv_dict_width, unsigned int inv_dict_width_pad, unsigned int * __restrict__ query_ones, unsigned int ones_cnt, unsigned int * matches, unsigned int int_size, unsigned int n_threads) { unsigned short curr_bit = 0; unsigned int match_pos; unsigned int match_cnt = 0; // unsigned int match_idx; __shared__ unsigned int match_idx_s[BLOCK_SIZE]; unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < inv_dict_width){ //inv_dict_width = 657 match_idx_s[threadIdx.x] = 0xFFFFFFFF; for(int i = 0; i < ones_cnt; i++) { match_idx_s[threadIdx.x] = match_idx_s[threadIdx.x] & *(inv_dictionary + query_ones[i]*inv_dict_width_pad + index); } match_cnt = 0; for(int j=1; j<=(int_size*8) && match_idx_s[threadIdx.x]>0; j++) { curr_bit = match_idx_s[threadIdx.x] & 1; match_idx_s[threadIdx.x] = match_idx_s[threadIdx.x] >> 1; if(curr_bit==1) { match_pos = (index + 1)*int_size*8 - j + 1; // starts indexing at 1 not 0 matches[index*int_size*8+match_cnt] = match_pos; match_cnt++; } } } } __host__ void load_inv_dictionary(char file[]){ unsigned int num_read; FILE *fp = NULL; fp = fopen(file, "r"); if (fp == NULL){ printf("Error while opening %s\n", file); } int i; for(int j = 0; j<D; j++) { for(i=0; i<INV_DICT_WIDTH-1; i++){ // printf("%d:\t%d:\t", j, i); num_read = fscanf(fp, "%u ", inv_dictionary + j*INV_DICT_WIDTH_PAD + i); // printf("%u\n", *(inv_dictionary + j*INV_DICT_WIDTH_PAD + i)); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } // printf("%d:\t%d:\t", j, i); num_read = fscanf(fp, "%u\n", inv_dictionary + j*INV_DICT_WIDTH_PAD + i); // printf("%u\n", *(inv_dictionary + j*INV_DICT_WIDTH_PAD + i)); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } fclose(fp); } __host__ void load_query(char file[]){ unsigned int num_read; FILE *fp = NULL; fp = fopen(file, "r"); if (fp == NULL){ printf("Error while opening %s\n", file); } for (int i=0; i<D; i++) { num_read = fscanf(fp, "%c", &query[i]); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } // printf("Loaded query: "); // for (int i=0; i<D; i++) { // printf("%c", query[i]); // } // printf("\n"); fclose(fp); } __host__ void printMatches(){ for (int i=0; i<N_FILES; i++) { printf("Match %d:\t%u\n", i, matches[i]); } } __host__ void load_files(char file[]) { FILE *fp = NULL; fp = fopen(file, "r"); int num_read = 0; if (fp == NULL){ printf("Error while opening %s\n", file); } for(int i = 0; i<N_FILES; i++) { num_read = fscanf(fp, "%s", files[i]); if (num_read != 1) printf("Error reading file: %s\n", file); // printf("%d: %s\n", i, files[i]); } fclose(fp); } unsigned int findQueryOnes(char* query){ unsigned int ones_cnt = 0; for(unsigned int i=0; i<D; i++) { if(query[i] == '1') { query_ones[ones_cnt] = i; printf("One position:\t%d\n", i); ones_cnt++; } } return ones_cnt; } __host__ void reportQuery(char* report_f){ FILE *rep = fopen(report_f, "w+"); for (int i=0; i < N_FILES; i++){ if (matches[i] != 0) { fprintf(rep, "%s\n", files[matches[i]-1]); } } fclose(rep); } int main(int argc, char** argv){ // Command-line arguments: // D, itemmem input file, dictionary input file, query if(argc != (ARG_COUNT+1)){ printf("Requires arguments: <files file> <dictionary input file> <query input file> <output directory>\n"); return 1; } char dict_file[MAX_FILE_NAME]; char files_file[MAX_FILE_NAME]; char query_file[MAX_FILE_NAME]; char output_dir[MAX_FILE_NAME]; char report_file[MAX_FILE_NAME]; sprintf(files_file, "%s", argv[1]); sprintf(dict_file, "%s", argv[2]); sprintf(query_file, "%s", argv[3]); sprintf(output_dir, "%s", argv[4]); printf("INV_DICT_SIZE:\t%u\nINV_DICT_WIDTH:\t%u\nINV_DICT_WIDTH_PAD:\t%u\nN_BLOCKS:\t%u\nBLOCK_SIZE:\t%d\n", INV_DICT_SIZE, INV_DICT_WIDTH, INV_DICT_WIDTH_PAD, N_BLOCKS, BLOCK_SIZE); sprintf(report_file, "%s/cu_query_report.txt", output_dir); unsigned int *d_inv_dict, *d_matches; unsigned int *d_query_ones; unsigned int ones_cnt; inv_dictionary = (unsigned int *)malloc(INV_DICT_SIZE*sizeof(unsigned int)); load_inv_dictionary(dict_file); load_query(query_file); ones_cnt = findQueryOnes(query); #ifdef PROFILING for (int i = 0; i < 100; ++i) { #endif // const int num_streams = 8; // cudaStream_t streams[num_streams]; cudaMalloc((void **)&d_inv_dict, INV_DICT_SIZE*sizeof(unsigned int)); cudaMalloc((void **)&d_query_ones, ones_cnt*sizeof(unsigned int)); cudaMalloc((void **)&d_matches, N_FILES*sizeof(unsigned int)); cudaMemset(d_matches, 0, N_FILES*sizeof(unsigned int)); // printf("Allocated arrays...\n"); cudaMemcpy(d_inv_dict, inv_dictionary, INV_DICT_SIZE*sizeof(unsigned int), cudaMemcpyHostToDevice); cudaError_t err = cudaMemcpy(d_query_ones, query_ones, ones_cnt*sizeof(unsigned int), cudaMemcpyHostToDevice); printf("Err: %d\n", err); // printf("Query...\n"); // for(int i=0; i < num_streams; i++) { // cudaStreamCreate(&streams[i]); // } // for(int i=0; i < num_streams; i++) { queryKernel<<<N_BLOCKS, BLOCK_SIZE>>>((unsigned int *)d_inv_dict, INV_DICT_WIDTH, INV_DICT_WIDTH_PAD, d_query_ones, ones_cnt, d_matches, sizeof(int), N_THREADS); cudaMemcpy(matches, d_matches, N_FILES*sizeof(unsigned int), cudaMemcpyDeviceToHost); // } // printMatches(); load_files(files_file); reportQuery(report_file); cudaFree(d_inv_dict); cudaFree(d_query_ones); cudaFree(d_matches); #ifdef PROFILING } #endif free(inv_dictionary); printf("Ended! :)\n"); return 0; }
.file "tmpxft_000672ac_00000000-6_cu-backup.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2076: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2076: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "r" .LC1: .string "Error while opening %s\n" .LC2: .string "%u " .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC3: .string "ERROR!\t fscanf did not fill all arguments\n" .section .rodata.str1.1 .LC4: .string "%u\n" .text .globl _Z19load_inv_dictionaryPc .type _Z19load_inv_dictionaryPc, @function _Z19load_inv_dictionaryPc: .LFB2067: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movq %rdi, %rbx leaq .LC0(%rip), %rsi call fopen@PLT movq %rax, %r12 testq %rax, %rax je .L12 .L4: movl $2624, %ebp leaq .LC2(%rip), %r13 leaq .LC3(%rip), %r14 leaq .LC4(%rip), %r15 jmp .L5 .L12: movq %rbx, %rdx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L4 .L6: addq $4, %rbx cmpq %rbp, %rbx je .L13 .L7: movq %rbx, %rdx addq inv_dictionary(%rip), %rdx movq %r13, %rsi movq %r12, %rdi movl $0, %eax call __isoc23_fscanf@PLT cmpl $1, %eax je .L6 movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L6 .L13: movq %rbp, %rdx addq inv_dictionary(%rip), %rdx movq %r15, %rsi movq %r12, %rdi movl $0, %eax call __isoc23_fscanf@PLT cmpl $1, %eax jne .L14 .L8: addq $3072, %rbp cmpq $30722624, %rbp je .L9 .L5: leaq -2624(%rbp), %rbx jmp .L7 .L14: movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L8 .L9: movq %r12, %rdi call fclose@PLT addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2067: .size _Z19load_inv_dictionaryPc, .-_Z19load_inv_dictionaryPc .section .rodata.str1.1 .LC5: .string "%c" .text .globl _Z10load_queryPc .type _Z10load_queryPc, @function _Z10load_queryPc: .LFB2068: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 movq %rdi, %rbx leaq .LC0(%rip), %rsi call fopen@PLT movq %rax, %rbp testq %rax, %rax je .L21 .L16: leaq query(%rip), %rbx leaq 10000(%rbx), %r13 leaq .LC5(%rip), %r12 leaq .LC3(%rip), %r14 jmp .L18 .L21: movq %rbx, %rdx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L16 .L17: addq $1, %rbx cmpq %r13, %rbx je .L22 .L18: movq %rbx, %rdx movq %r12, %rsi movq %rbp, %rdi movl $0, %eax call __isoc23_fscanf@PLT cmpl $1, %eax je .L17 movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L17 .L22: movq %rbp, %rdi call fclose@PLT popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2068: .size _Z10load_queryPc, .-_Z10load_queryPc .section .rodata.str1.1 .LC6: .string "Match %d:\t%u\n" .text .globl _Z12printMatchesv .type _Z12printMatchesv, @function _Z12printMatchesv: .LFB2069: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movl $0, %ebx leaq matches(%rip), %r12 leaq .LC6(%rip), %rbp .L24: movl (%r12,%rbx,4), %ecx movl %ebx, %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $1, %rbx cmpq $21000, %rbx jne .L24 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2069: .size _Z12printMatchesv, .-_Z12printMatchesv .section .rodata.str1.1 .LC7: .string "%s" .LC8: .string "Error reading file: %s\n" .text .globl _Z10load_filesPc .type _Z10load_filesPc, @function _Z10load_filesPc: .LFB2070: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movq %rdi, %r14 leaq .LC0(%rip), %rsi call fopen@PLT movq %rax, %rbp testq %rax, %rax je .L33 .L28: leaq files(%rip), %rbx leaq 2100000(%rbx), %r13 leaq .LC7(%rip), %r12 leaq .LC8(%rip), %r15 jmp .L30 .L33: movq %r14, %rdx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L28 .L29: addq $100, %rbx cmpq %r13, %rbx je .L34 .L30: movq %rbx, %rdx movq %r12, %rsi movq %rbp, %rdi movl $0, %eax call __isoc23_fscanf@PLT cmpl $1, %eax je .L29 movq %r14, %rdx movq %r15, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L29 .L34: movq %rbp, %rdi call fclose@PLT addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2070: .size _Z10load_filesPc, .-_Z10load_filesPc .section .rodata.str1.1 .LC9: .string "One position:\t%d\n" .text .globl _Z13findQueryOnesPc .type _Z13findQueryOnesPc, @function _Z13findQueryOnesPc: .LFB2071: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 movq %rdi, %rbp movl $0, %ebx movl $0, %r12d leaq query_ones(%rip), %r14 leaq .LC9(%rip), %r13 jmp .L37 .L36: addq $1, %rbx cmpq $10000, %rbx je .L40 .L37: cmpb $49, 0(%rbp,%rbx) jne .L36 movl %ebx, %edx movl %r12d, %eax movl %ebx, (%r14,%rax,4) movq %r13, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %r12d jmp .L36 .L40: movl %r12d, %eax popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2071: .size _Z13findQueryOnesPc, .-_Z13findQueryOnesPc .section .rodata.str1.1 .LC10: .string "w+" .LC11: .string "%s\n" .text .globl _Z11reportQueryPc .type _Z11reportQueryPc, @function _Z11reportQueryPc: .LFB2072: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 leaq .LC10(%rip), %rsi call fopen@PLT movq %rax, %r12 leaq matches(%rip), %rbx leaq 84000(%rbx), %rbp leaq files(%rip), %r14 leaq .LC11(%rip), %r13 jmp .L43 .L42: addq $4, %rbx cmpq %rbp, %rbx je .L46 .L43: movl (%rbx), %eax testl %eax, %eax je .L42 leal -1(%rax), %eax leaq (%rax,%rax,4), %rax leaq (%rax,%rax,4), %rax leaq (%r14,%rax,4), %rcx movq %r13, %rdx movl $2, %esi movq %r12, %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L42 .L46: movq %r12, %rdi call fclose@PLT popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2072: .size _Z11reportQueryPc, .-_Z11reportQueryPc .globl _Z40__device_stub__Z11queryKernelPjjjS_jS_jjPjjjS_jS_jj .type _Z40__device_stub__Z11queryKernelPjjjS_jS_jjPjjjS_jS_jj, @function _Z40__device_stub__Z11queryKernelPjjjS_jS_jjPjjjS_jS_jj: .LFB2098: .cfi_startproc endbr64 subq $200, %rsp .cfi_def_cfa_offset 208 movl %esi, 28(%rsp) movl %edx, 24(%rsp) movl %r8d, 20(%rsp) movq %r9, 8(%rsp) movq %fs:40, %rax movq %rax, 184(%rsp) xorl %eax, %eax movq %rdi, 32(%rsp) leaq 32(%rsp), %rax movq %rax, 112(%rsp) leaq 28(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) movq %rcx, 40(%rsp) leaq 40(%rsp), %rax movq %rax, 136(%rsp) leaq 20(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) leaq 208(%rsp), %rax movq %rax, 160(%rsp) leaq 216(%rsp), %rax movq %rax, 168(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L51 .L47: movq 184(%rsp), %rax subq %fs:40, %rax jne .L52 addq $200, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L51: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 216 pushq 56(%rsp) .cfi_def_cfa_offset 224 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z11queryKernelPjjjS_jS_jj(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 208 jmp .L47 .L52: call __stack_chk_fail@PLT .cfi_endproc .LFE2098: .size _Z40__device_stub__Z11queryKernelPjjjS_jS_jjPjjjS_jS_jj, .-_Z40__device_stub__Z11queryKernelPjjjS_jS_jjPjjjS_jS_jj .globl _Z11queryKernelPjjjS_jS_jj .type _Z11queryKernelPjjjS_jS_jj, @function _Z11queryKernelPjjjS_jS_jj: .LFB2099: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movl 24(%rsp), %eax pushq %rax .cfi_def_cfa_offset 24 movl 24(%rsp), %eax pushq %rax .cfi_def_cfa_offset 32 call _Z40__device_stub__Z11queryKernelPjjjS_jS_jjPjjjS_jS_jj addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2099: .size _Z11queryKernelPjjjS_jS_jj, .-_Z11queryKernelPjjjS_jS_jj .section .rodata.str1.8 .align 8 .LC12: .string "Requires arguments: <files file> <dictionary input file> <query input file> <output directory>\n" .align 8 .LC13: .string "INV_DICT_SIZE:\t%u\nINV_DICT_WIDTH:\t%u\nINV_DICT_WIDTH_PAD:\t%u\nN_BLOCKS:\t%u\nBLOCK_SIZE:\t%d\n" .section .rodata.str1.1 .LC14: .string "%s/cu_query_report.txt" .LC15: .string "Err: %d\n" .LC16: .string "Ended! :)\n" .text .globl main .type main, @function main: .LFB2073: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $608, %rsp .cfi_def_cfa_offset 656 movq %fs:40, %rax movq %rax, 600(%rsp) xorl %eax, %eax cmpl $5, %edi je .L56 leaq .LC12(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $1, %eax .L55: movq 600(%rsp), %rdx subq %fs:40, %rdx jne .L61 addq $608, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L56: .cfi_restore_state movq %rsi, %rbx leaq 160(%rsp), %rdi movq 8(%rsi), %r8 leaq .LC7(%rip), %rbp movq %rbp, %rcx movl $100, %edx movl $2, %esi movl $0, %eax call __sprintf_chk@PLT leaq 48(%rsp), %r13 movq 16(%rbx), %r8 movq %rbp, %rcx movl $100, %edx movl $2, %esi movq %r13, %rdi movl $0, %eax call __sprintf_chk@PLT leaq 272(%rsp), %r12 movq 24(%rbx), %r8 movq %rbp, %rcx movl $100, %edx movl $2, %esi movq %r12, %rdi movl $0, %eax call __sprintf_chk@PLT leaq 384(%rsp), %r14 movq 32(%rbx), %r8 movq %rbp, %rcx movl $100, %edx movl $2, %esi movq %r14, %rdi movl $0, %eax call __sprintf_chk@PLT subq $8, %rsp .cfi_def_cfa_offset 664 pushq $64 .cfi_def_cfa_offset 672 movl $11, %r9d movl $768, %r8d movl $657, %ecx movl $7680000, %edx leaq .LC13(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 512(%rsp), %rdi movq %r14, %r8 leaq .LC14(%rip), %rcx movl $100, %edx movl $2, %esi movl $0, %eax call __sprintf_chk@PLT movl $30720000, %edi call malloc@PLT movq %rax, inv_dictionary(%rip) addq $16, %rsp .cfi_def_cfa_offset 656 movq %r13, %rdi call _Z19load_inv_dictionaryPc movq %r12, %rdi call _Z10load_queryPc leaq query(%rip), %rdi call _Z13findQueryOnesPc movl %eax, %ebp movq %rsp, %rdi movl $30720000, %esi call cudaMalloc@PLT movl %ebp, %ebx salq $2, %rbx leaq 16(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 8(%rsp), %rdi movl $84000, %esi call cudaMalloc@PLT movl $84000, %edx movl $0, %esi movq 8(%rsp), %rdi call cudaMemset@PLT movl $1, %ecx movl $30720000, %edx movq inv_dictionary(%rip), %rsi movq (%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx leaq query_ones(%rip), %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %edx leaq .LC15(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $64, 36(%rsp) movl $1, 40(%rsp) movl $11, 24(%rsp) movl $1, 28(%rsp) movl $0, %r9d movl $0, %r8d movq 36(%rsp), %rdx movl $1, %ecx movq 24(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L62 .L58: movl $2, %ecx movl $84000, %edx movq 8(%rsp), %rsi leaq matches(%rip), %rdi call cudaMemcpy@PLT leaq 160(%rsp), %rdi call _Z10load_filesPc leaq 496(%rsp), %rdi call _Z11reportQueryPc movq (%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq inv_dictionary(%rip), %rdi call free@PLT leaq .LC16(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $0, %eax jmp .L55 .L62: pushq $704 .cfi_def_cfa_offset 664 pushq $4 .cfi_def_cfa_offset 672 movq 24(%rsp), %r9 movl %ebp, %r8d movq 32(%rsp), %rcx movl $768, %edx movl $657, %esi movq 16(%rsp), %rdi call _Z40__device_stub__Z11queryKernelPjjjS_jS_jjPjjjS_jS_jj addq $16, %rsp .cfi_def_cfa_offset 656 jmp .L58 .L61: call __stack_chk_fail@PLT .cfi_endproc .LFE2073: .size main, .-main .section .rodata.str1.1 .LC17: .string "_Z11queryKernelPjjjS_jS_jj" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2101: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC17(%rip), %rdx movq %rdx, %rcx leaq _Z11queryKernelPjjjS_jS_jj(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2101: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .globl matches .bss .align 32 .type matches, @object .size matches, 84000 matches: .zero 84000 .globl query_ones .align 32 .type query_ones, @object .size query_ones, 40000 query_ones: .zero 40000 .globl query .align 32 .type query, @object .size query, 10000 query: .zero 10000 .globl files .align 32 .type files, @object .size files, 2100000 files: .zero 2100000 .globl inv_dictionary .align 8 .type inv_dictionary, @object .size inv_dictionary, 8 inv_dictionary: .zero 8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <pthread.h> #include <stdio.h> #include <string.h> #include <math.h> #ifndef D #define D 10000 #endif #ifndef N_FILES #define N_FILES 21000 #endif #define ARG_COUNT 4 #define MAX_FILE_NAME 100 #define GMEM_GRANULARITY 128 #define INV_DICT_WIDTH ((unsigned int)(ceil(N_FILES / (float)(sizeof(int)*8)))) #define INV_DICT_WIDTH_PAD ((INV_DICT_WIDTH/GMEM_GRANULARITY+1)*GMEM_GRANULARITY) #define INV_DICT_SIZE (D*INV_DICT_WIDTH_PAD) #define BLOCK_SIZE 64 #define N_BLOCKS ((unsigned int)(ceil(N_FILES/ (float)(sizeof(int)*8*BLOCK_SIZE)))) #define N_THREADS (BLOCK_SIZE*N_BLOCKS) unsigned int *inv_dictionary; char files[N_FILES][MAX_FILE_NAME]; char query[D]; unsigned int query_ones[D]; unsigned int matches[N_FILES]; // inv_dict_width = width of inv dictionary in number of elements // int_size = size of an integer in bytes __global__ void queryKernel(unsigned int * __restrict__ inv_dictionary, unsigned int inv_dict_width, unsigned int inv_dict_width_pad, unsigned int * __restrict__ query_ones, unsigned int ones_cnt, unsigned int * matches, unsigned int int_size, unsigned int n_threads) { unsigned short curr_bit = 0; unsigned int match_pos; unsigned int match_cnt = 0; // unsigned int match_idx; __shared__ unsigned int match_idx_s[BLOCK_SIZE]; unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < inv_dict_width){ //inv_dict_width = 657 match_idx_s[threadIdx.x] = 0xFFFFFFFF; for(int i = 0; i < ones_cnt; i++) { match_idx_s[threadIdx.x] = match_idx_s[threadIdx.x] & *(inv_dictionary + query_ones[i]*inv_dict_width_pad + index); } match_cnt = 0; for(int j=1; j<=(int_size*8) && match_idx_s[threadIdx.x]>0; j++) { curr_bit = match_idx_s[threadIdx.x] & 1; match_idx_s[threadIdx.x] = match_idx_s[threadIdx.x] >> 1; if(curr_bit==1) { match_pos = (index + 1)*int_size*8 - j + 1; // starts indexing at 1 not 0 matches[index*int_size*8+match_cnt] = match_pos; match_cnt++; } } } } __host__ void load_inv_dictionary(char file[]){ unsigned int num_read; FILE *fp = NULL; fp = fopen(file, "r"); if (fp == NULL){ printf("Error while opening %s\n", file); } int i; for(int j = 0; j<D; j++) { for(i=0; i<INV_DICT_WIDTH-1; i++){ // printf("%d:\t%d:\t", j, i); num_read = fscanf(fp, "%u ", inv_dictionary + j*INV_DICT_WIDTH_PAD + i); // printf("%u\n", *(inv_dictionary + j*INV_DICT_WIDTH_PAD + i)); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } // printf("%d:\t%d:\t", j, i); num_read = fscanf(fp, "%u\n", inv_dictionary + j*INV_DICT_WIDTH_PAD + i); // printf("%u\n", *(inv_dictionary + j*INV_DICT_WIDTH_PAD + i)); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } fclose(fp); } __host__ void load_query(char file[]){ unsigned int num_read; FILE *fp = NULL; fp = fopen(file, "r"); if (fp == NULL){ printf("Error while opening %s\n", file); } for (int i=0; i<D; i++) { num_read = fscanf(fp, "%c", &query[i]); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } // printf("Loaded query: "); // for (int i=0; i<D; i++) { // printf("%c", query[i]); // } // printf("\n"); fclose(fp); } __host__ void printMatches(){ for (int i=0; i<N_FILES; i++) { printf("Match %d:\t%u\n", i, matches[i]); } } __host__ void load_files(char file[]) { FILE *fp = NULL; fp = fopen(file, "r"); int num_read = 0; if (fp == NULL){ printf("Error while opening %s\n", file); } for(int i = 0; i<N_FILES; i++) { num_read = fscanf(fp, "%s", files[i]); if (num_read != 1) printf("Error reading file: %s\n", file); // printf("%d: %s\n", i, files[i]); } fclose(fp); } unsigned int findQueryOnes(char* query){ unsigned int ones_cnt = 0; for(unsigned int i=0; i<D; i++) { if(query[i] == '1') { query_ones[ones_cnt] = i; printf("One position:\t%d\n", i); ones_cnt++; } } return ones_cnt; } __host__ void reportQuery(char* report_f){ FILE *rep = fopen(report_f, "w+"); for (int i=0; i < N_FILES; i++){ if (matches[i] != 0) { fprintf(rep, "%s\n", files[matches[i]-1]); } } fclose(rep); } int main(int argc, char** argv){ // Command-line arguments: // D, itemmem input file, dictionary input file, query if(argc != (ARG_COUNT+1)){ printf("Requires arguments: <files file> <dictionary input file> <query input file> <output directory>\n"); return 1; } char dict_file[MAX_FILE_NAME]; char files_file[MAX_FILE_NAME]; char query_file[MAX_FILE_NAME]; char output_dir[MAX_FILE_NAME]; char report_file[MAX_FILE_NAME]; sprintf(files_file, "%s", argv[1]); sprintf(dict_file, "%s", argv[2]); sprintf(query_file, "%s", argv[3]); sprintf(output_dir, "%s", argv[4]); printf("INV_DICT_SIZE:\t%u\nINV_DICT_WIDTH:\t%u\nINV_DICT_WIDTH_PAD:\t%u\nN_BLOCKS:\t%u\nBLOCK_SIZE:\t%d\n", INV_DICT_SIZE, INV_DICT_WIDTH, INV_DICT_WIDTH_PAD, N_BLOCKS, BLOCK_SIZE); sprintf(report_file, "%s/cu_query_report.txt", output_dir); unsigned int *d_inv_dict, *d_matches; unsigned int *d_query_ones; unsigned int ones_cnt; inv_dictionary = (unsigned int *)malloc(INV_DICT_SIZE*sizeof(unsigned int)); load_inv_dictionary(dict_file); load_query(query_file); ones_cnt = findQueryOnes(query); #ifdef PROFILING for (int i = 0; i < 100; ++i) { #endif // const int num_streams = 8; // cudaStream_t streams[num_streams]; cudaMalloc((void **)&d_inv_dict, INV_DICT_SIZE*sizeof(unsigned int)); cudaMalloc((void **)&d_query_ones, ones_cnt*sizeof(unsigned int)); cudaMalloc((void **)&d_matches, N_FILES*sizeof(unsigned int)); cudaMemset(d_matches, 0, N_FILES*sizeof(unsigned int)); // printf("Allocated arrays...\n"); cudaMemcpy(d_inv_dict, inv_dictionary, INV_DICT_SIZE*sizeof(unsigned int), cudaMemcpyHostToDevice); cudaError_t err = cudaMemcpy(d_query_ones, query_ones, ones_cnt*sizeof(unsigned int), cudaMemcpyHostToDevice); printf("Err: %d\n", err); // printf("Query...\n"); // for(int i=0; i < num_streams; i++) { // cudaStreamCreate(&streams[i]); // } // for(int i=0; i < num_streams; i++) { queryKernel<<<N_BLOCKS, BLOCK_SIZE>>>((unsigned int *)d_inv_dict, INV_DICT_WIDTH, INV_DICT_WIDTH_PAD, d_query_ones, ones_cnt, d_matches, sizeof(int), N_THREADS); cudaMemcpy(matches, d_matches, N_FILES*sizeof(unsigned int), cudaMemcpyDeviceToHost); // } // printMatches(); load_files(files_file); reportQuery(report_file); cudaFree(d_inv_dict); cudaFree(d_query_ones); cudaFree(d_matches); #ifdef PROFILING } #endif free(inv_dictionary); printf("Ended! :)\n"); return 0; }
#include <hip/hip_runtime.h> #include <pthread.h> #include <stdio.h> #include <string.h> #include <math.h> #ifndef D #define D 10000 #endif #ifndef N_FILES #define N_FILES 21000 #endif #define ARG_COUNT 4 #define MAX_FILE_NAME 100 #define GMEM_GRANULARITY 128 #define INV_DICT_WIDTH ((unsigned int)(ceil(N_FILES / (float)(sizeof(int)*8)))) #define INV_DICT_WIDTH_PAD ((INV_DICT_WIDTH/GMEM_GRANULARITY+1)*GMEM_GRANULARITY) #define INV_DICT_SIZE (D*INV_DICT_WIDTH_PAD) #define BLOCK_SIZE 64 #define N_BLOCKS ((unsigned int)(ceil(N_FILES/ (float)(sizeof(int)*8*BLOCK_SIZE)))) #define N_THREADS (BLOCK_SIZE*N_BLOCKS) unsigned int *inv_dictionary; char files[N_FILES][MAX_FILE_NAME]; char query[D]; unsigned int query_ones[D]; unsigned int matches[N_FILES]; // inv_dict_width = width of inv dictionary in number of elements // int_size = size of an integer in bytes __global__ void queryKernel(unsigned int * __restrict__ inv_dictionary, unsigned int inv_dict_width, unsigned int inv_dict_width_pad, unsigned int * __restrict__ query_ones, unsigned int ones_cnt, unsigned int * matches, unsigned int int_size, unsigned int n_threads) { unsigned short curr_bit = 0; unsigned int match_pos; unsigned int match_cnt = 0; // unsigned int match_idx; __shared__ unsigned int match_idx_s[BLOCK_SIZE]; unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < inv_dict_width){ //inv_dict_width = 657 match_idx_s[threadIdx.x] = 0xFFFFFFFF; for(int i = 0; i < ones_cnt; i++) { match_idx_s[threadIdx.x] = match_idx_s[threadIdx.x] & *(inv_dictionary + query_ones[i]*inv_dict_width_pad + index); } match_cnt = 0; for(int j=1; j<=(int_size*8) && match_idx_s[threadIdx.x]>0; j++) { curr_bit = match_idx_s[threadIdx.x] & 1; match_idx_s[threadIdx.x] = match_idx_s[threadIdx.x] >> 1; if(curr_bit==1) { match_pos = (index + 1)*int_size*8 - j + 1; // starts indexing at 1 not 0 matches[index*int_size*8+match_cnt] = match_pos; match_cnt++; } } } } __host__ void load_inv_dictionary(char file[]){ unsigned int num_read; FILE *fp = NULL; fp = fopen(file, "r"); if (fp == NULL){ printf("Error while opening %s\n", file); } int i; for(int j = 0; j<D; j++) { for(i=0; i<INV_DICT_WIDTH-1; i++){ // printf("%d:\t%d:\t", j, i); num_read = fscanf(fp, "%u ", inv_dictionary + j*INV_DICT_WIDTH_PAD + i); // printf("%u\n", *(inv_dictionary + j*INV_DICT_WIDTH_PAD + i)); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } // printf("%d:\t%d:\t", j, i); num_read = fscanf(fp, "%u\n", inv_dictionary + j*INV_DICT_WIDTH_PAD + i); // printf("%u\n", *(inv_dictionary + j*INV_DICT_WIDTH_PAD + i)); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } fclose(fp); } __host__ void load_query(char file[]){ unsigned int num_read; FILE *fp = NULL; fp = fopen(file, "r"); if (fp == NULL){ printf("Error while opening %s\n", file); } for (int i=0; i<D; i++) { num_read = fscanf(fp, "%c", &query[i]); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } // printf("Loaded query: "); // for (int i=0; i<D; i++) { // printf("%c", query[i]); // } // printf("\n"); fclose(fp); } __host__ void printMatches(){ for (int i=0; i<N_FILES; i++) { printf("Match %d:\t%u\n", i, matches[i]); } } __host__ void load_files(char file[]) { FILE *fp = NULL; fp = fopen(file, "r"); int num_read = 0; if (fp == NULL){ printf("Error while opening %s\n", file); } for(int i = 0; i<N_FILES; i++) { num_read = fscanf(fp, "%s", files[i]); if (num_read != 1) printf("Error reading file: %s\n", file); // printf("%d: %s\n", i, files[i]); } fclose(fp); } unsigned int findQueryOnes(char* query){ unsigned int ones_cnt = 0; for(unsigned int i=0; i<D; i++) { if(query[i] == '1') { query_ones[ones_cnt] = i; printf("One position:\t%d\n", i); ones_cnt++; } } return ones_cnt; } __host__ void reportQuery(char* report_f){ FILE *rep = fopen(report_f, "w+"); for (int i=0; i < N_FILES; i++){ if (matches[i] != 0) { fprintf(rep, "%s\n", files[matches[i]-1]); } } fclose(rep); } int main(int argc, char** argv){ // Command-line arguments: // D, itemmem input file, dictionary input file, query if(argc != (ARG_COUNT+1)){ printf("Requires arguments: <files file> <dictionary input file> <query input file> <output directory>\n"); return 1; } char dict_file[MAX_FILE_NAME]; char files_file[MAX_FILE_NAME]; char query_file[MAX_FILE_NAME]; char output_dir[MAX_FILE_NAME]; char report_file[MAX_FILE_NAME]; sprintf(files_file, "%s", argv[1]); sprintf(dict_file, "%s", argv[2]); sprintf(query_file, "%s", argv[3]); sprintf(output_dir, "%s", argv[4]); printf("INV_DICT_SIZE:\t%u\nINV_DICT_WIDTH:\t%u\nINV_DICT_WIDTH_PAD:\t%u\nN_BLOCKS:\t%u\nBLOCK_SIZE:\t%d\n", INV_DICT_SIZE, INV_DICT_WIDTH, INV_DICT_WIDTH_PAD, N_BLOCKS, BLOCK_SIZE); sprintf(report_file, "%s/cu_query_report.txt", output_dir); unsigned int *d_inv_dict, *d_matches; unsigned int *d_query_ones; unsigned int ones_cnt; inv_dictionary = (unsigned int *)malloc(INV_DICT_SIZE*sizeof(unsigned int)); load_inv_dictionary(dict_file); load_query(query_file); ones_cnt = findQueryOnes(query); #ifdef PROFILING for (int i = 0; i < 100; ++i) { #endif // const int num_streams = 8; // cudaStream_t streams[num_streams]; hipMalloc((void **)&d_inv_dict, INV_DICT_SIZE*sizeof(unsigned int)); hipMalloc((void **)&d_query_ones, ones_cnt*sizeof(unsigned int)); hipMalloc((void **)&d_matches, N_FILES*sizeof(unsigned int)); hipMemset(d_matches, 0, N_FILES*sizeof(unsigned int)); // printf("Allocated arrays...\n"); hipMemcpy(d_inv_dict, inv_dictionary, INV_DICT_SIZE*sizeof(unsigned int), hipMemcpyHostToDevice); hipError_t err = hipMemcpy(d_query_ones, query_ones, ones_cnt*sizeof(unsigned int), hipMemcpyHostToDevice); printf("Err: %d\n", err); // printf("Query...\n"); // for(int i=0; i < num_streams; i++) { // cudaStreamCreate(&streams[i]); // } // for(int i=0; i < num_streams; i++) { queryKernel<<<N_BLOCKS, BLOCK_SIZE>>>((unsigned int *)d_inv_dict, INV_DICT_WIDTH, INV_DICT_WIDTH_PAD, d_query_ones, ones_cnt, d_matches, sizeof(int), N_THREADS); hipMemcpy(matches, d_matches, N_FILES*sizeof(unsigned int), hipMemcpyDeviceToHost); // } // printMatches(); load_files(files_file); reportQuery(report_file); hipFree(d_inv_dict); hipFree(d_query_ones); hipFree(d_matches); #ifdef PROFILING } #endif free(inv_dictionary); printf("Ended! :)\n"); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <pthread.h> #include <stdio.h> #include <string.h> #include <math.h> #ifndef D #define D 10000 #endif #ifndef N_FILES #define N_FILES 21000 #endif #define ARG_COUNT 4 #define MAX_FILE_NAME 100 #define GMEM_GRANULARITY 128 #define INV_DICT_WIDTH ((unsigned int)(ceil(N_FILES / (float)(sizeof(int)*8)))) #define INV_DICT_WIDTH_PAD ((INV_DICT_WIDTH/GMEM_GRANULARITY+1)*GMEM_GRANULARITY) #define INV_DICT_SIZE (D*INV_DICT_WIDTH_PAD) #define BLOCK_SIZE 64 #define N_BLOCKS ((unsigned int)(ceil(N_FILES/ (float)(sizeof(int)*8*BLOCK_SIZE)))) #define N_THREADS (BLOCK_SIZE*N_BLOCKS) unsigned int *inv_dictionary; char files[N_FILES][MAX_FILE_NAME]; char query[D]; unsigned int query_ones[D]; unsigned int matches[N_FILES]; // inv_dict_width = width of inv dictionary in number of elements // int_size = size of an integer in bytes __global__ void queryKernel(unsigned int * __restrict__ inv_dictionary, unsigned int inv_dict_width, unsigned int inv_dict_width_pad, unsigned int * __restrict__ query_ones, unsigned int ones_cnt, unsigned int * matches, unsigned int int_size, unsigned int n_threads) { unsigned short curr_bit = 0; unsigned int match_pos; unsigned int match_cnt = 0; // unsigned int match_idx; __shared__ unsigned int match_idx_s[BLOCK_SIZE]; unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < inv_dict_width){ //inv_dict_width = 657 match_idx_s[threadIdx.x] = 0xFFFFFFFF; for(int i = 0; i < ones_cnt; i++) { match_idx_s[threadIdx.x] = match_idx_s[threadIdx.x] & *(inv_dictionary + query_ones[i]*inv_dict_width_pad + index); } match_cnt = 0; for(int j=1; j<=(int_size*8) && match_idx_s[threadIdx.x]>0; j++) { curr_bit = match_idx_s[threadIdx.x] & 1; match_idx_s[threadIdx.x] = match_idx_s[threadIdx.x] >> 1; if(curr_bit==1) { match_pos = (index + 1)*int_size*8 - j + 1; // starts indexing at 1 not 0 matches[index*int_size*8+match_cnt] = match_pos; match_cnt++; } } } } __host__ void load_inv_dictionary(char file[]){ unsigned int num_read; FILE *fp = NULL; fp = fopen(file, "r"); if (fp == NULL){ printf("Error while opening %s\n", file); } int i; for(int j = 0; j<D; j++) { for(i=0; i<INV_DICT_WIDTH-1; i++){ // printf("%d:\t%d:\t", j, i); num_read = fscanf(fp, "%u ", inv_dictionary + j*INV_DICT_WIDTH_PAD + i); // printf("%u\n", *(inv_dictionary + j*INV_DICT_WIDTH_PAD + i)); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } // printf("%d:\t%d:\t", j, i); num_read = fscanf(fp, "%u\n", inv_dictionary + j*INV_DICT_WIDTH_PAD + i); // printf("%u\n", *(inv_dictionary + j*INV_DICT_WIDTH_PAD + i)); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } fclose(fp); } __host__ void load_query(char file[]){ unsigned int num_read; FILE *fp = NULL; fp = fopen(file, "r"); if (fp == NULL){ printf("Error while opening %s\n", file); } for (int i=0; i<D; i++) { num_read = fscanf(fp, "%c", &query[i]); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } // printf("Loaded query: "); // for (int i=0; i<D; i++) { // printf("%c", query[i]); // } // printf("\n"); fclose(fp); } __host__ void printMatches(){ for (int i=0; i<N_FILES; i++) { printf("Match %d:\t%u\n", i, matches[i]); } } __host__ void load_files(char file[]) { FILE *fp = NULL; fp = fopen(file, "r"); int num_read = 0; if (fp == NULL){ printf("Error while opening %s\n", file); } for(int i = 0; i<N_FILES; i++) { num_read = fscanf(fp, "%s", files[i]); if (num_read != 1) printf("Error reading file: %s\n", file); // printf("%d: %s\n", i, files[i]); } fclose(fp); } unsigned int findQueryOnes(char* query){ unsigned int ones_cnt = 0; for(unsigned int i=0; i<D; i++) { if(query[i] == '1') { query_ones[ones_cnt] = i; printf("One position:\t%d\n", i); ones_cnt++; } } return ones_cnt; } __host__ void reportQuery(char* report_f){ FILE *rep = fopen(report_f, "w+"); for (int i=0; i < N_FILES; i++){ if (matches[i] != 0) { fprintf(rep, "%s\n", files[matches[i]-1]); } } fclose(rep); } int main(int argc, char** argv){ // Command-line arguments: // D, itemmem input file, dictionary input file, query if(argc != (ARG_COUNT+1)){ printf("Requires arguments: <files file> <dictionary input file> <query input file> <output directory>\n"); return 1; } char dict_file[MAX_FILE_NAME]; char files_file[MAX_FILE_NAME]; char query_file[MAX_FILE_NAME]; char output_dir[MAX_FILE_NAME]; char report_file[MAX_FILE_NAME]; sprintf(files_file, "%s", argv[1]); sprintf(dict_file, "%s", argv[2]); sprintf(query_file, "%s", argv[3]); sprintf(output_dir, "%s", argv[4]); printf("INV_DICT_SIZE:\t%u\nINV_DICT_WIDTH:\t%u\nINV_DICT_WIDTH_PAD:\t%u\nN_BLOCKS:\t%u\nBLOCK_SIZE:\t%d\n", INV_DICT_SIZE, INV_DICT_WIDTH, INV_DICT_WIDTH_PAD, N_BLOCKS, BLOCK_SIZE); sprintf(report_file, "%s/cu_query_report.txt", output_dir); unsigned int *d_inv_dict, *d_matches; unsigned int *d_query_ones; unsigned int ones_cnt; inv_dictionary = (unsigned int *)malloc(INV_DICT_SIZE*sizeof(unsigned int)); load_inv_dictionary(dict_file); load_query(query_file); ones_cnt = findQueryOnes(query); #ifdef PROFILING for (int i = 0; i < 100; ++i) { #endif // const int num_streams = 8; // cudaStream_t streams[num_streams]; hipMalloc((void **)&d_inv_dict, INV_DICT_SIZE*sizeof(unsigned int)); hipMalloc((void **)&d_query_ones, ones_cnt*sizeof(unsigned int)); hipMalloc((void **)&d_matches, N_FILES*sizeof(unsigned int)); hipMemset(d_matches, 0, N_FILES*sizeof(unsigned int)); // printf("Allocated arrays...\n"); hipMemcpy(d_inv_dict, inv_dictionary, INV_DICT_SIZE*sizeof(unsigned int), hipMemcpyHostToDevice); hipError_t err = hipMemcpy(d_query_ones, query_ones, ones_cnt*sizeof(unsigned int), hipMemcpyHostToDevice); printf("Err: %d\n", err); // printf("Query...\n"); // for(int i=0; i < num_streams; i++) { // cudaStreamCreate(&streams[i]); // } // for(int i=0; i < num_streams; i++) { queryKernel<<<N_BLOCKS, BLOCK_SIZE>>>((unsigned int *)d_inv_dict, INV_DICT_WIDTH, INV_DICT_WIDTH_PAD, d_query_ones, ones_cnt, d_matches, sizeof(int), N_THREADS); hipMemcpy(matches, d_matches, N_FILES*sizeof(unsigned int), hipMemcpyDeviceToHost); // } // printMatches(); load_files(files_file); reportQuery(report_file); hipFree(d_inv_dict); hipFree(d_query_ones); hipFree(d_matches); #ifdef PROFILING } #endif free(inv_dictionary); printf("Ended! :)\n"); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z11queryKernelPjjjS_jS_jj .globl _Z11queryKernelPjjjS_jS_jj .p2align 8 .type _Z11queryKernelPjjjS_jS_jj,@function _Z11queryKernelPjjjS_jS_jj: s_clause 0x1 s_load_b32 s2, s[0:1], 0x3c s_load_b32 s3, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_u32_e64 s3, v1 s_cbranch_execz .LBB0_12 s_load_b32 s6, s[0:1], 0x18 v_dual_mov_b32 v0, -1 :: v_dual_lshlrev_b32 v3, 2, v0 s_mov_b32 s3, 0 ds_store_b32 v3, v0 s_waitcnt lgkmcnt(0) s_cmp_eq_u32 s6, 0 s_cbranch_scc1 .LBB0_5 s_clause 0x2 s_load_b64 s[8:9], s[0:1], 0x0 s_load_b64 s[4:5], s[0:1], 0x10 s_load_b32 s7, s[0:1], 0xc ds_load_b32 v0, v3 v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s8, v4 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v4, vcc_lo, s9, v5, vcc_lo .LBB0_3: s_load_b32 s2, s[4:5], 0x0 s_add_i32 s6, s6, -1 s_waitcnt lgkmcnt(0) s_mul_i32 s2, s2, s7 s_delay_alu instid0(SALU_CYCLE_1) s_lshl_b64 s[8:9], s[2:3], 2 s_add_u32 s4, s4, 4 v_add_co_u32 v5, vcc_lo, v2, s8 v_add_co_ci_u32_e32 v6, vcc_lo, s9, v4, vcc_lo s_addc_u32 s5, s5, 0 s_cmp_eq_u32 s6, 0 global_load_b32 v5, v[5:6], off s_waitcnt vmcnt(0) v_and_b32_e32 v0, v5, v0 s_cbranch_scc0 .LBB0_3 ds_store_b32 v3, v0 .LBB0_5: s_load_b32 s3, s[0:1], 0x28 s_waitcnt lgkmcnt(0) s_lshl_b32 s2, s3, 3 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s2, 0 s_cbranch_scc1 .LBB0_12 s_load_b64 s[0:1], s[0:1], 0x20 ds_load_b32 v0, v3 s_lshl_b32 s3, s3, 3 v_mov_b32_e32 v5, 0 v_mul_lo_u32 v2, s3, v1 s_delay_alu instid0(VALU_DEP_1) v_dual_mov_b32 v1, 0 :: v_dual_add_nc_u32 v4, s3, v2 s_mov_b32 s3, 0 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_9 .p2align 6 .LBB0_7: s_or_b32 exec_lo, exec_lo, s6 s_add_i32 s2, s2, -1 v_add_nc_u32_e32 v4, -1, v4 s_cmp_eq_u32 s2, 0 s_cselect_b32 s6, -1, 0 s_and_not1_b32 s4, s4, exec_lo s_and_b32 s6, s6, exec_lo s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 s4, s4, s6 .LBB0_8: s_or_b32 exec_lo, exec_lo, s5 v_mov_b32_e32 v0, v6 s_and_b32 s5, exec_lo, s4 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_or_b32 s3, s5, s3 s_and_not1_b32 exec_lo, exec_lo, s3 s_cbranch_execz .LBB0_12 .LBB0_9: s_or_b32 s4, s4, exec_lo s_mov_b32 s5, exec_lo s_waitcnt lgkmcnt(0) v_cmpx_ne_u32_e32 0, v0 s_cbranch_execz .LBB0_8 v_and_b32_e32 v7, 1, v0 v_lshrrev_b32_e32 v6, 1, v0 s_mov_b32 s6, exec_lo ds_store_b32 v3, v6 v_cmpx_eq_u32_e32 1, v7 s_cbranch_execz .LBB0_7 v_add_nc_u32_e32 v0, v5, v2 v_add_nc_u32_e32 v5, 1, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[7:8], 2, v[0:1] v_add_co_u32 v7, vcc_lo, s0, v7 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v8, vcc_lo, s1, v8, vcc_lo global_store_b32 v[7:8], v4, off s_branch .LBB0_7 .LBB0_12: s_set_inst_prefetch_distance 0x2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11queryKernelPjjjS_jS_jj .amdhsa_group_segment_fixed_size 256 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 304 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z11queryKernelPjjjS_jS_jj, .Lfunc_end0-_Z11queryKernelPjjjS_jS_jj .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .actual_access: read_only .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value - .actual_access: read_only .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .address_space: global .offset: 32 .size: 8 .value_kind: global_buffer - .offset: 40 .size: 4 .value_kind: by_value - .offset: 44 .size: 4 .value_kind: by_value - .offset: 48 .size: 4 .value_kind: hidden_block_count_x - .offset: 52 .size: 4 .value_kind: hidden_block_count_y - .offset: 56 .size: 4 .value_kind: hidden_block_count_z - .offset: 60 .size: 2 .value_kind: hidden_group_size_x - .offset: 62 .size: 2 .value_kind: hidden_group_size_y - .offset: 64 .size: 2 .value_kind: hidden_group_size_z - .offset: 66 .size: 2 .value_kind: hidden_remainder_x - .offset: 68 .size: 2 .value_kind: hidden_remainder_y - .offset: 70 .size: 2 .value_kind: hidden_remainder_z - .offset: 88 .size: 8 .value_kind: hidden_global_offset_x - .offset: 96 .size: 8 .value_kind: hidden_global_offset_y - .offset: 104 .size: 8 .value_kind: hidden_global_offset_z - .offset: 112 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 256 .kernarg_segment_align: 8 .kernarg_segment_size: 304 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11queryKernelPjjjS_jS_jj .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z11queryKernelPjjjS_jS_jj.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <pthread.h> #include <stdio.h> #include <string.h> #include <math.h> #ifndef D #define D 10000 #endif #ifndef N_FILES #define N_FILES 21000 #endif #define ARG_COUNT 4 #define MAX_FILE_NAME 100 #define GMEM_GRANULARITY 128 #define INV_DICT_WIDTH ((unsigned int)(ceil(N_FILES / (float)(sizeof(int)*8)))) #define INV_DICT_WIDTH_PAD ((INV_DICT_WIDTH/GMEM_GRANULARITY+1)*GMEM_GRANULARITY) #define INV_DICT_SIZE (D*INV_DICT_WIDTH_PAD) #define BLOCK_SIZE 64 #define N_BLOCKS ((unsigned int)(ceil(N_FILES/ (float)(sizeof(int)*8*BLOCK_SIZE)))) #define N_THREADS (BLOCK_SIZE*N_BLOCKS) unsigned int *inv_dictionary; char files[N_FILES][MAX_FILE_NAME]; char query[D]; unsigned int query_ones[D]; unsigned int matches[N_FILES]; // inv_dict_width = width of inv dictionary in number of elements // int_size = size of an integer in bytes __global__ void queryKernel(unsigned int * __restrict__ inv_dictionary, unsigned int inv_dict_width, unsigned int inv_dict_width_pad, unsigned int * __restrict__ query_ones, unsigned int ones_cnt, unsigned int * matches, unsigned int int_size, unsigned int n_threads) { unsigned short curr_bit = 0; unsigned int match_pos; unsigned int match_cnt = 0; // unsigned int match_idx; __shared__ unsigned int match_idx_s[BLOCK_SIZE]; unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < inv_dict_width){ //inv_dict_width = 657 match_idx_s[threadIdx.x] = 0xFFFFFFFF; for(int i = 0; i < ones_cnt; i++) { match_idx_s[threadIdx.x] = match_idx_s[threadIdx.x] & *(inv_dictionary + query_ones[i]*inv_dict_width_pad + index); } match_cnt = 0; for(int j=1; j<=(int_size*8) && match_idx_s[threadIdx.x]>0; j++) { curr_bit = match_idx_s[threadIdx.x] & 1; match_idx_s[threadIdx.x] = match_idx_s[threadIdx.x] >> 1; if(curr_bit==1) { match_pos = (index + 1)*int_size*8 - j + 1; // starts indexing at 1 not 0 matches[index*int_size*8+match_cnt] = match_pos; match_cnt++; } } } } __host__ void load_inv_dictionary(char file[]){ unsigned int num_read; FILE *fp = NULL; fp = fopen(file, "r"); if (fp == NULL){ printf("Error while opening %s\n", file); } int i; for(int j = 0; j<D; j++) { for(i=0; i<INV_DICT_WIDTH-1; i++){ // printf("%d:\t%d:\t", j, i); num_read = fscanf(fp, "%u ", inv_dictionary + j*INV_DICT_WIDTH_PAD + i); // printf("%u\n", *(inv_dictionary + j*INV_DICT_WIDTH_PAD + i)); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } // printf("%d:\t%d:\t", j, i); num_read = fscanf(fp, "%u\n", inv_dictionary + j*INV_DICT_WIDTH_PAD + i); // printf("%u\n", *(inv_dictionary + j*INV_DICT_WIDTH_PAD + i)); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } fclose(fp); } __host__ void load_query(char file[]){ unsigned int num_read; FILE *fp = NULL; fp = fopen(file, "r"); if (fp == NULL){ printf("Error while opening %s\n", file); } for (int i=0; i<D; i++) { num_read = fscanf(fp, "%c", &query[i]); if (num_read != 1) printf("ERROR!\t fscanf did not fill all arguments\n"); } // printf("Loaded query: "); // for (int i=0; i<D; i++) { // printf("%c", query[i]); // } // printf("\n"); fclose(fp); } __host__ void printMatches(){ for (int i=0; i<N_FILES; i++) { printf("Match %d:\t%u\n", i, matches[i]); } } __host__ void load_files(char file[]) { FILE *fp = NULL; fp = fopen(file, "r"); int num_read = 0; if (fp == NULL){ printf("Error while opening %s\n", file); } for(int i = 0; i<N_FILES; i++) { num_read = fscanf(fp, "%s", files[i]); if (num_read != 1) printf("Error reading file: %s\n", file); // printf("%d: %s\n", i, files[i]); } fclose(fp); } unsigned int findQueryOnes(char* query){ unsigned int ones_cnt = 0; for(unsigned int i=0; i<D; i++) { if(query[i] == '1') { query_ones[ones_cnt] = i; printf("One position:\t%d\n", i); ones_cnt++; } } return ones_cnt; } __host__ void reportQuery(char* report_f){ FILE *rep = fopen(report_f, "w+"); for (int i=0; i < N_FILES; i++){ if (matches[i] != 0) { fprintf(rep, "%s\n", files[matches[i]-1]); } } fclose(rep); } int main(int argc, char** argv){ // Command-line arguments: // D, itemmem input file, dictionary input file, query if(argc != (ARG_COUNT+1)){ printf("Requires arguments: <files file> <dictionary input file> <query input file> <output directory>\n"); return 1; } char dict_file[MAX_FILE_NAME]; char files_file[MAX_FILE_NAME]; char query_file[MAX_FILE_NAME]; char output_dir[MAX_FILE_NAME]; char report_file[MAX_FILE_NAME]; sprintf(files_file, "%s", argv[1]); sprintf(dict_file, "%s", argv[2]); sprintf(query_file, "%s", argv[3]); sprintf(output_dir, "%s", argv[4]); printf("INV_DICT_SIZE:\t%u\nINV_DICT_WIDTH:\t%u\nINV_DICT_WIDTH_PAD:\t%u\nN_BLOCKS:\t%u\nBLOCK_SIZE:\t%d\n", INV_DICT_SIZE, INV_DICT_WIDTH, INV_DICT_WIDTH_PAD, N_BLOCKS, BLOCK_SIZE); sprintf(report_file, "%s/cu_query_report.txt", output_dir); unsigned int *d_inv_dict, *d_matches; unsigned int *d_query_ones; unsigned int ones_cnt; inv_dictionary = (unsigned int *)malloc(INV_DICT_SIZE*sizeof(unsigned int)); load_inv_dictionary(dict_file); load_query(query_file); ones_cnt = findQueryOnes(query); #ifdef PROFILING for (int i = 0; i < 100; ++i) { #endif // const int num_streams = 8; // cudaStream_t streams[num_streams]; hipMalloc((void **)&d_inv_dict, INV_DICT_SIZE*sizeof(unsigned int)); hipMalloc((void **)&d_query_ones, ones_cnt*sizeof(unsigned int)); hipMalloc((void **)&d_matches, N_FILES*sizeof(unsigned int)); hipMemset(d_matches, 0, N_FILES*sizeof(unsigned int)); // printf("Allocated arrays...\n"); hipMemcpy(d_inv_dict, inv_dictionary, INV_DICT_SIZE*sizeof(unsigned int), hipMemcpyHostToDevice); hipError_t err = hipMemcpy(d_query_ones, query_ones, ones_cnt*sizeof(unsigned int), hipMemcpyHostToDevice); printf("Err: %d\n", err); // printf("Query...\n"); // for(int i=0; i < num_streams; i++) { // cudaStreamCreate(&streams[i]); // } // for(int i=0; i < num_streams; i++) { queryKernel<<<N_BLOCKS, BLOCK_SIZE>>>((unsigned int *)d_inv_dict, INV_DICT_WIDTH, INV_DICT_WIDTH_PAD, d_query_ones, ones_cnt, d_matches, sizeof(int), N_THREADS); hipMemcpy(matches, d_matches, N_FILES*sizeof(unsigned int), hipMemcpyDeviceToHost); // } // printMatches(); load_files(files_file); reportQuery(report_file); hipFree(d_inv_dict); hipFree(d_query_ones); hipFree(d_matches); #ifdef PROFILING } #endif free(inv_dictionary); printf("Ended! :)\n"); return 0; }
.text .file "cu-backup.hip" .globl _Z26__device_stub__queryKernelPjjjS_jS_jj # -- Begin function _Z26__device_stub__queryKernelPjjjS_jS_jj .p2align 4, 0x90 .type _Z26__device_stub__queryKernelPjjjS_jS_jj,@function _Z26__device_stub__queryKernelPjjjS_jS_jj: # @_Z26__device_stub__queryKernelPjjjS_jS_jj .cfi_startproc # %bb.0: subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 88(%rsp) movl %esi, 20(%rsp) movl %edx, 16(%rsp) movq %rcx, 80(%rsp) movl %r8d, 12(%rsp) movq %r9, 72(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 72(%rsp), %rax movq %rax, 136(%rsp) leaq 176(%rsp), %rax movq %rax, 144(%rsp) leaq 184(%rsp), %rax movq %rax, 152(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z11queryKernelPjjjS_jS_jj, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $184, %rsp .cfi_adjust_cfa_offset -184 retq .Lfunc_end0: .size _Z26__device_stub__queryKernelPjjjS_jS_jj, .Lfunc_end0-_Z26__device_stub__queryKernelPjjjS_jS_jj .cfi_endproc # -- End function .globl _Z19load_inv_dictionaryPc # -- Begin function _Z19load_inv_dictionaryPc .p2align 4, 0x90 .type _Z19load_inv_dictionaryPc,@function _Z19load_inv_dictionaryPc: # @_Z19load_inv_dictionaryPc .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %r14 movl $.L.str, %esi callq fopen movq %rax, %rbx testq %rax, %rax jne .LBB1_2 # %bb.1: movl $.L.str.1, %edi movq %r14, %rsi xorl %eax, %eax callq printf .LBB1_2: # %.preheader.preheader xorl %r14d, %r14d xorl %r15d, %r15d jmp .LBB1_3 .p2align 4, 0x90 .LBB1_9: # in Loop: Header=BB1_3 Depth=1 incq %r15 addq $3072, %r14 # imm = 0xC00 cmpq $10000, %r15 # imm = 0x2710 je .LBB1_10 .LBB1_3: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_4 Depth 2 xorl %r12d, %r12d jmp .LBB1_4 .p2align 4, 0x90 .LBB1_6: # in Loop: Header=BB1_4 Depth=2 addq $4, %r12 cmpq $2624, %r12 # imm = 0xA40 je .LBB1_7 .LBB1_4: # Parent Loop BB1_3 Depth=1 # => This Inner Loop Header: Depth=2 movq inv_dictionary(%rip), %rdx addq %r14, %rdx addq %r12, %rdx movl $.L.str.2, %esi movq %rbx, %rdi xorl %eax, %eax callq __isoc23_fscanf cmpl $1, %eax je .LBB1_6 # %bb.5: # in Loop: Header=BB1_4 Depth=2 movl $.Lstr.2, %edi callq puts@PLT jmp .LBB1_6 .p2align 4, 0x90 .LBB1_7: # in Loop: Header=BB1_3 Depth=1 movq inv_dictionary(%rip), %rax leaq (%r15,%r15,2), %rcx shlq $10, %rcx leaq (%rax,%rcx), %rdx addq $2624, %rdx # imm = 0xA40 movl $.L.str.4, %esi movq %rbx, %rdi xorl %eax, %eax callq __isoc23_fscanf cmpl $1, %eax je .LBB1_9 # %bb.8: # in Loop: Header=BB1_3 Depth=1 movl $.Lstr.2, %edi callq puts@PLT jmp .LBB1_9 .LBB1_10: movq %rbx, %rdi addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 jmp fclose # TAILCALL .Lfunc_end1: .size _Z19load_inv_dictionaryPc, .Lfunc_end1-_Z19load_inv_dictionaryPc .cfi_endproc # -- End function .globl _Z10load_queryPc # -- Begin function _Z10load_queryPc .p2align 4, 0x90 .type _Z10load_queryPc,@function _Z10load_queryPc: # @_Z10load_queryPc .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movq %rdi, %r14 movl $.L.str, %esi callq fopen movq %rax, %rbx testq %rax, %rax jne .LBB2_1 # %bb.6: movl $.L.str.1, %edi movq %r14, %rsi xorl %eax, %eax callq printf .LBB2_1: # %.preheader xorl %r14d, %r14d jmp .LBB2_2 .p2align 4, 0x90 .LBB2_4: # in Loop: Header=BB2_2 Depth=1 incq %r14 cmpq $10000, %r14 # imm = 0x2710 je .LBB2_5 .LBB2_2: # =>This Inner Loop Header: Depth=1 leaq query(%r14), %rdx movl $.L.str.5, %esi movq %rbx, %rdi xorl %eax, %eax callq __isoc23_fscanf cmpl $1, %eax je .LBB2_4 # %bb.3: # in Loop: Header=BB2_2 Depth=1 movl $.Lstr.2, %edi callq puts@PLT jmp .LBB2_4 .LBB2_5: movq %rbx, %rdi addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 jmp fclose # TAILCALL .Lfunc_end2: .size _Z10load_queryPc, .Lfunc_end2-_Z10load_queryPc .cfi_endproc # -- End function .globl _Z12printMatchesv # -- Begin function _Z12printMatchesv .p2align 4, 0x90 .type _Z12printMatchesv,@function _Z12printMatchesv: # @_Z12printMatchesv .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 xorl %ebx, %ebx .p2align 4, 0x90 .LBB3_1: # =>This Inner Loop Header: Depth=1 movl matches(,%rbx,4), %edx movl $.L.str.6, %edi movl %ebx, %esi xorl %eax, %eax callq printf incq %rbx cmpq $21000, %rbx # imm = 0x5208 jne .LBB3_1 # %bb.2: popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size _Z12printMatchesv, .Lfunc_end3-_Z12printMatchesv .cfi_endproc # -- End function .globl _Z10load_filesPc # -- Begin function _Z10load_filesPc .p2align 4, 0x90 .type _Z10load_filesPc,@function _Z10load_filesPc: # @_Z10load_filesPc .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %rbx movl $.L.str, %esi callq fopen movq %rax, %r14 testq %rax, %rax jne .LBB4_1 # %bb.6: movl $.L.str.1, %edi movq %rbx, %rsi xorl %eax, %eax callq printf .LBB4_1: # %.preheader xorl %r15d, %r15d jmp .LBB4_2 .p2align 4, 0x90 .LBB4_4: # in Loop: Header=BB4_2 Depth=1 addq $100, %r15 cmpq $2100000, %r15 # imm = 0x200B20 je .LBB4_5 .LBB4_2: # =>This Inner Loop Header: Depth=1 leaq files(%r15), %rdx movl $.L.str.7, %esi movq %r14, %rdi xorl %eax, %eax callq __isoc23_fscanf cmpl $1, %eax je .LBB4_4 # %bb.3: # in Loop: Header=BB4_2 Depth=1 movl $.L.str.8, %edi movq %rbx, %rsi xorl %eax, %eax callq printf jmp .LBB4_4 .LBB4_5: movq %r14, %rdi popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 jmp fclose # TAILCALL .Lfunc_end4: .size _Z10load_filesPc, .Lfunc_end4-_Z10load_filesPc .cfi_endproc # -- End function .globl _Z13findQueryOnesPc # -- Begin function _Z13findQueryOnesPc .p2align 4, 0x90 .type _Z13findQueryOnesPc,@function _Z13findQueryOnesPc: # @_Z13findQueryOnesPc .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %rbp, -16 movq %rdi, %rbx xorl %r14d, %r14d xorl %ebp, %ebp jmp .LBB5_1 .p2align 4, 0x90 .LBB5_3: # in Loop: Header=BB5_1 Depth=1 incq %r14 cmpq $10000, %r14 # imm = 0x2710 je .LBB5_4 .LBB5_1: # =>This Inner Loop Header: Depth=1 cmpb $49, (%rbx,%r14) jne .LBB5_3 # %bb.2: # in Loop: Header=BB5_1 Depth=1 movl %ebp, %eax movl %r14d, query_ones(,%rax,4) movl $.L.str.9, %edi movl %r14d, %esi xorl %eax, %eax callq printf incl %ebp jmp .LBB5_3 .LBB5_4: movl %ebp, %eax popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size _Z13findQueryOnesPc, .Lfunc_end5-_Z13findQueryOnesPc .cfi_endproc # -- End function .globl _Z11reportQueryPc # -- Begin function _Z11reportQueryPc .p2align 4, 0x90 .type _Z11reportQueryPc,@function _Z11reportQueryPc: # @_Z11reportQueryPc .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl $.L.str.10, %esi callq fopen movq %rax, %rbx movq $-84000, %r14 # imm = 0xFFFEB7E0 jmp .LBB6_1 .p2align 4, 0x90 .LBB6_3: # in Loop: Header=BB6_1 Depth=1 addq $4, %r14 je .LBB6_4 .LBB6_1: # =>This Inner Loop Header: Depth=1 movl matches+84000(%r14), %eax testl %eax, %eax je .LBB6_3 # %bb.2: # in Loop: Header=BB6_1 Depth=1 decl %eax imulq $100, %rax, %rax leaq files(%rax), %rdx movl $.L.str.11, %esi movq %rbx, %rdi xorl %eax, %eax callq fprintf jmp .LBB6_3 .LBB6_4: movq %rbx, %rdi addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 jmp fclose # TAILCALL .Lfunc_end6: .size _Z11reportQueryPc, .Lfunc_end6-_Z11reportQueryPc .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 subq $744, %rsp # imm = 0x2E8 .cfi_def_cfa_offset 784 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 cmpl $5, %edi jne .LBB7_1 # %bb.2: movq 8(%rsi), %rax leaq 192(%rsp), %rdi movq %rsi, %r15 movq %rax, %rsi callq strcpy@PLT movq 16(%r15), %rsi leaq 640(%rsp), %rbx movq %rbx, %rdi callq strcpy@PLT movq 24(%r15), %rsi leaq 416(%rsp), %r14 movq %r14, %rdi callq strcpy@PLT movq 32(%r15), %rsi leaq 528(%rsp), %r15 movq %r15, %rdi callq strcpy@PLT movl $.L.str.13, %edi movl $7680000, %esi # imm = 0x753000 movl $657, %edx # imm = 0x291 movl $768, %ecx # imm = 0x300 movl $11, %r8d movl $64, %r9d xorl %eax, %eax callq printf leaq 304(%rsp), %rdi movl $.L.str.14, %esi movq %r15, %rdx xorl %eax, %eax callq sprintf movl $30720000, %edi # imm = 0x1D4C000 callq malloc movq %rax, inv_dictionary(%rip) movq %rbx, %rdi callq _Z19load_inv_dictionaryPc movl $.L.str, %esi movq %r14, %rdi callq fopen movq %rax, %rbx testq %rax, %rax jne .LBB7_4 # %bb.3: leaq 416(%rsp), %rsi movl $.L.str.1, %edi xorl %eax, %eax callq printf .LBB7_4: # %.preheader30 xorl %r14d, %r14d jmp .LBB7_5 .p2align 4, 0x90 .LBB7_7: # in Loop: Header=BB7_5 Depth=1 incq %r14 cmpq $10000, %r14 # imm = 0x2710 je .LBB7_8 .LBB7_5: # =>This Inner Loop Header: Depth=1 leaq query(%r14), %rdx movl $.L.str.5, %esi movq %rbx, %rdi xorl %eax, %eax callq __isoc23_fscanf cmpl $1, %eax je .LBB7_7 # %bb.6: # in Loop: Header=BB7_5 Depth=1 movl $.Lstr.2, %edi callq puts@PLT jmp .LBB7_7 .LBB7_1: movl $.Lstr.4, %edi callq puts@PLT movl $1, %eax jmp .LBB7_25 .LBB7_8: # %_Z10load_queryPc.exit movq %rbx, %rdi callq fclose xorl %ebx, %ebx xorl %ebp, %ebp jmp .LBB7_9 .p2align 4, 0x90 .LBB7_11: # in Loop: Header=BB7_9 Depth=1 incq %rbx cmpq $10000, %rbx # imm = 0x2710 je .LBB7_12 .LBB7_9: # =>This Inner Loop Header: Depth=1 cmpb $49, query(%rbx) jne .LBB7_11 # %bb.10: # in Loop: Header=BB7_9 Depth=1 movl %ebp, %eax movl %ebx, query_ones(,%rax,4) movl $.L.str.9, %edi movl %ebx, %esi xorl %eax, %eax callq printf incl %ebp jmp .LBB7_11 .LBB7_12: # %_Z13findQueryOnesPc.exit leaq 24(%rsp), %rdi movl $30720000, %esi # imm = 0x1D4C000 callq hipMalloc movl %ebp, %ebx shlq $2, %rbx leaq 16(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 8(%rsp), %rdi movl $84000, %esi # imm = 0x14820 callq hipMalloc movq 8(%rsp), %rdi movl $84000, %edx # imm = 0x14820 xorl %esi, %esi callq hipMemset movq 24(%rsp), %rdi movq inv_dictionary(%rip), %rsi movl $30720000, %edx # imm = 0x1D4C000 movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movl $query_ones, %esi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movl $.L.str.15, %edi movl %eax, %esi xorl %eax, %eax callq printf movabsq $4294967307, %rdi # imm = 0x10000000B leaq 53(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB7_14 # %bb.13: movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 8(%rsp), %rdx movq %rax, 120(%rsp) movl $657, 52(%rsp) # imm = 0x291 movl $768, 48(%rsp) # imm = 0x300 movq %rcx, 112(%rsp) movl %ebp, 44(%rsp) movq %rdx, 104(%rsp) movl $4, 40(%rsp) movl $704, 36(%rsp) # imm = 0x2C0 leaq 120(%rsp), %rax movq %rax, 128(%rsp) leaq 52(%rsp), %rax movq %rax, 136(%rsp) leaq 48(%rsp), %rax movq %rax, 144(%rsp) leaq 112(%rsp), %rax movq %rax, 152(%rsp) leaq 44(%rsp), %rax movq %rax, 160(%rsp) leaq 104(%rsp), %rax movq %rax, 168(%rsp) leaq 40(%rsp), %rax movq %rax, 176(%rsp) leaq 36(%rsp), %rax movq %rax, 184(%rsp) leaq 88(%rsp), %rdi leaq 72(%rsp), %rsi leaq 64(%rsp), %rdx leaq 56(%rsp), %rcx callq __hipPopCallConfiguration movq 88(%rsp), %rsi movl 96(%rsp), %edx movq 72(%rsp), %rcx movl 80(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z11queryKernelPjjjS_jS_jj, %edi pushq 56(%rsp) .cfi_adjust_cfa_offset 8 pushq 72(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB7_14: movq 8(%rsp), %rsi movl $matches, %edi movl $84000, %edx # imm = 0x14820 movl $2, %ecx callq hipMemcpy leaq 192(%rsp), %rdi movl $.L.str, %esi callq fopen movq %rax, %rbx testq %rax, %rax jne .LBB7_16 # %bb.15: leaq 192(%rsp), %rsi movl $.L.str.1, %edi xorl %eax, %eax callq printf .LBB7_16: # %.preheader leaq 192(%rsp), %r14 xorl %r15d, %r15d jmp .LBB7_17 .p2align 4, 0x90 .LBB7_19: # in Loop: Header=BB7_17 Depth=1 addq $100, %r15 cmpq $2100000, %r15 # imm = 0x200B20 je .LBB7_20 .LBB7_17: # =>This Inner Loop Header: Depth=1 leaq files(%r15), %rdx movl $.L.str.7, %esi movq %rbx, %rdi xorl %eax, %eax callq __isoc23_fscanf cmpl $1, %eax je .LBB7_19 # %bb.18: # in Loop: Header=BB7_17 Depth=1 movl $.L.str.8, %edi movq %r14, %rsi xorl %eax, %eax callq printf jmp .LBB7_19 .LBB7_20: # %_Z10load_filesPc.exit movq %rbx, %rdi callq fclose leaq 304(%rsp), %rdi movl $.L.str.10, %esi callq fopen movq %rax, %rbx movq $-84000, %r14 # imm = 0xFFFEB7E0 jmp .LBB7_21 .p2align 4, 0x90 .LBB7_23: # in Loop: Header=BB7_21 Depth=1 addq $4, %r14 je .LBB7_24 .LBB7_21: # =>This Inner Loop Header: Depth=1 movl matches+84000(%r14), %eax testl %eax, %eax je .LBB7_23 # %bb.22: # in Loop: Header=BB7_21 Depth=1 decl %eax imulq $100, %rax, %rax leaq files(%rax), %rdx movl $.L.str.11, %esi movq %rbx, %rdi xorl %eax, %eax callq fprintf jmp .LBB7_23 .LBB7_24: # %_Z11reportQueryPc.exit movq %rbx, %rdi callq fclose movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq inv_dictionary(%rip), %rdi callq free movl $.Lstr.3, %edi callq puts@PLT xorl %eax, %eax .LBB7_25: addq $744, %rsp # imm = 0x2E8 .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end7: .size main, .Lfunc_end7-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB8_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB8_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11queryKernelPjjjS_jS_jj, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end8: .size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB9_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB9_2: retq .Lfunc_end9: .size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor .cfi_endproc # -- End function .type inv_dictionary,@object # @inv_dictionary .bss .globl inv_dictionary .p2align 3, 0x0 inv_dictionary: .quad 0 .size inv_dictionary, 8 .type files,@object # @files .globl files .p2align 4, 0x0 files: .zero 2100000 .size files, 2100000 .type query,@object # @query .globl query .p2align 4, 0x0 query: .zero 10000 .size query, 10000 .type query_ones,@object # @query_ones .globl query_ones .p2align 4, 0x0 query_ones: .zero 40000 .size query_ones, 40000 .type matches,@object # @matches .globl matches .p2align 4, 0x0 matches: .zero 84000 .size matches, 84000 .type _Z11queryKernelPjjjS_jS_jj,@object # @_Z11queryKernelPjjjS_jS_jj .section .rodata,"a",@progbits .globl _Z11queryKernelPjjjS_jS_jj .p2align 3, 0x0 _Z11queryKernelPjjjS_jS_jj: .quad _Z26__device_stub__queryKernelPjjjS_jS_jj .size _Z11queryKernelPjjjS_jS_jj, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "r" .size .L.str, 2 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Error while opening %s\n" .size .L.str.1, 24 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%u " .size .L.str.2, 4 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "%u\n" .size .L.str.4, 4 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "%c" .size .L.str.5, 3 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "Match %d:\t%u\n" .size .L.str.6, 14 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "%s" .size .L.str.7, 3 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "Error reading file: %s\n" .size .L.str.8, 24 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz "One position:\t%d\n" .size .L.str.9, 18 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "w+" .size .L.str.10, 3 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz "%s\n" .size .L.str.11, 4 .type .L.str.13,@object # @.str.13 .L.str.13: .asciz "INV_DICT_SIZE:\t%u\nINV_DICT_WIDTH:\t%u\nINV_DICT_WIDTH_PAD:\t%u\nN_BLOCKS:\t%u\nBLOCK_SIZE:\t%d\n" .size .L.str.13, 89 .type .L.str.14,@object # @.str.14 .L.str.14: .asciz "%s/cu_query_report.txt" .size .L.str.14, 23 .type .L.str.15,@object # @.str.15 .L.str.15: .asciz "Err: %d\n" .size .L.str.15, 9 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z11queryKernelPjjjS_jS_jj" .size .L__unnamed_1, 27 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr.2,@object # @str.2 .section .rodata.str1.1,"aMS",@progbits,1 .Lstr.2: .asciz "ERROR!\t fscanf did not fill all arguments" .size .Lstr.2, 42 .type .Lstr.3,@object # @str.3 .Lstr.3: .asciz "Ended! :)" .size .Lstr.3, 10 .type .Lstr.4,@object # @str.4 .Lstr.4: .asciz "Requires arguments: <files file> <dictionary input file> <query input file> <output directory>" .size .Lstr.4, 95 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z26__device_stub__queryKernelPjjjS_jS_jj .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym files .addrsig_sym query .addrsig_sym query_ones .addrsig_sym matches .addrsig_sym _Z11queryKernelPjjjS_jS_jj .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000672ac_00000000-6_cu-backup.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2076: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2076: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "r" .LC1: .string "Error while opening %s\n" .LC2: .string "%u " .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC3: .string "ERROR!\t fscanf did not fill all arguments\n" .section .rodata.str1.1 .LC4: .string "%u\n" .text .globl _Z19load_inv_dictionaryPc .type _Z19load_inv_dictionaryPc, @function _Z19load_inv_dictionaryPc: .LFB2067: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movq %rdi, %rbx leaq .LC0(%rip), %rsi call fopen@PLT movq %rax, %r12 testq %rax, %rax je .L12 .L4: movl $2624, %ebp leaq .LC2(%rip), %r13 leaq .LC3(%rip), %r14 leaq .LC4(%rip), %r15 jmp .L5 .L12: movq %rbx, %rdx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L4 .L6: addq $4, %rbx cmpq %rbp, %rbx je .L13 .L7: movq %rbx, %rdx addq inv_dictionary(%rip), %rdx movq %r13, %rsi movq %r12, %rdi movl $0, %eax call __isoc23_fscanf@PLT cmpl $1, %eax je .L6 movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L6 .L13: movq %rbp, %rdx addq inv_dictionary(%rip), %rdx movq %r15, %rsi movq %r12, %rdi movl $0, %eax call __isoc23_fscanf@PLT cmpl $1, %eax jne .L14 .L8: addq $3072, %rbp cmpq $30722624, %rbp je .L9 .L5: leaq -2624(%rbp), %rbx jmp .L7 .L14: movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L8 .L9: movq %r12, %rdi call fclose@PLT addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2067: .size _Z19load_inv_dictionaryPc, .-_Z19load_inv_dictionaryPc .section .rodata.str1.1 .LC5: .string "%c" .text .globl _Z10load_queryPc .type _Z10load_queryPc, @function _Z10load_queryPc: .LFB2068: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 movq %rdi, %rbx leaq .LC0(%rip), %rsi call fopen@PLT movq %rax, %rbp testq %rax, %rax je .L21 .L16: leaq query(%rip), %rbx leaq 10000(%rbx), %r13 leaq .LC5(%rip), %r12 leaq .LC3(%rip), %r14 jmp .L18 .L21: movq %rbx, %rdx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L16 .L17: addq $1, %rbx cmpq %r13, %rbx je .L22 .L18: movq %rbx, %rdx movq %r12, %rsi movq %rbp, %rdi movl $0, %eax call __isoc23_fscanf@PLT cmpl $1, %eax je .L17 movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L17 .L22: movq %rbp, %rdi call fclose@PLT popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2068: .size _Z10load_queryPc, .-_Z10load_queryPc .section .rodata.str1.1 .LC6: .string "Match %d:\t%u\n" .text .globl _Z12printMatchesv .type _Z12printMatchesv, @function _Z12printMatchesv: .LFB2069: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movl $0, %ebx leaq matches(%rip), %r12 leaq .LC6(%rip), %rbp .L24: movl (%r12,%rbx,4), %ecx movl %ebx, %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $1, %rbx cmpq $21000, %rbx jne .L24 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2069: .size _Z12printMatchesv, .-_Z12printMatchesv .section .rodata.str1.1 .LC7: .string "%s" .LC8: .string "Error reading file: %s\n" .text .globl _Z10load_filesPc .type _Z10load_filesPc, @function _Z10load_filesPc: .LFB2070: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movq %rdi, %r14 leaq .LC0(%rip), %rsi call fopen@PLT movq %rax, %rbp testq %rax, %rax je .L33 .L28: leaq files(%rip), %rbx leaq 2100000(%rbx), %r13 leaq .LC7(%rip), %r12 leaq .LC8(%rip), %r15 jmp .L30 .L33: movq %r14, %rdx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L28 .L29: addq $100, %rbx cmpq %r13, %rbx je .L34 .L30: movq %rbx, %rdx movq %r12, %rsi movq %rbp, %rdi movl $0, %eax call __isoc23_fscanf@PLT cmpl $1, %eax je .L29 movq %r14, %rdx movq %r15, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L29 .L34: movq %rbp, %rdi call fclose@PLT addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2070: .size _Z10load_filesPc, .-_Z10load_filesPc .section .rodata.str1.1 .LC9: .string "One position:\t%d\n" .text .globl _Z13findQueryOnesPc .type _Z13findQueryOnesPc, @function _Z13findQueryOnesPc: .LFB2071: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 movq %rdi, %rbp movl $0, %ebx movl $0, %r12d leaq query_ones(%rip), %r14 leaq .LC9(%rip), %r13 jmp .L37 .L36: addq $1, %rbx cmpq $10000, %rbx je .L40 .L37: cmpb $49, 0(%rbp,%rbx) jne .L36 movl %ebx, %edx movl %r12d, %eax movl %ebx, (%r14,%rax,4) movq %r13, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %r12d jmp .L36 .L40: movl %r12d, %eax popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2071: .size _Z13findQueryOnesPc, .-_Z13findQueryOnesPc .section .rodata.str1.1 .LC10: .string "w+" .LC11: .string "%s\n" .text .globl _Z11reportQueryPc .type _Z11reportQueryPc, @function _Z11reportQueryPc: .LFB2072: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 leaq .LC10(%rip), %rsi call fopen@PLT movq %rax, %r12 leaq matches(%rip), %rbx leaq 84000(%rbx), %rbp leaq files(%rip), %r14 leaq .LC11(%rip), %r13 jmp .L43 .L42: addq $4, %rbx cmpq %rbp, %rbx je .L46 .L43: movl (%rbx), %eax testl %eax, %eax je .L42 leal -1(%rax), %eax leaq (%rax,%rax,4), %rax leaq (%rax,%rax,4), %rax leaq (%r14,%rax,4), %rcx movq %r13, %rdx movl $2, %esi movq %r12, %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L42 .L46: movq %r12, %rdi call fclose@PLT popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2072: .size _Z11reportQueryPc, .-_Z11reportQueryPc .globl _Z40__device_stub__Z11queryKernelPjjjS_jS_jjPjjjS_jS_jj .type _Z40__device_stub__Z11queryKernelPjjjS_jS_jjPjjjS_jS_jj, @function _Z40__device_stub__Z11queryKernelPjjjS_jS_jjPjjjS_jS_jj: .LFB2098: .cfi_startproc endbr64 subq $200, %rsp .cfi_def_cfa_offset 208 movl %esi, 28(%rsp) movl %edx, 24(%rsp) movl %r8d, 20(%rsp) movq %r9, 8(%rsp) movq %fs:40, %rax movq %rax, 184(%rsp) xorl %eax, %eax movq %rdi, 32(%rsp) leaq 32(%rsp), %rax movq %rax, 112(%rsp) leaq 28(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) movq %rcx, 40(%rsp) leaq 40(%rsp), %rax movq %rax, 136(%rsp) leaq 20(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) leaq 208(%rsp), %rax movq %rax, 160(%rsp) leaq 216(%rsp), %rax movq %rax, 168(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L51 .L47: movq 184(%rsp), %rax subq %fs:40, %rax jne .L52 addq $200, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L51: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 216 pushq 56(%rsp) .cfi_def_cfa_offset 224 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z11queryKernelPjjjS_jS_jj(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 208 jmp .L47 .L52: call __stack_chk_fail@PLT .cfi_endproc .LFE2098: .size _Z40__device_stub__Z11queryKernelPjjjS_jS_jjPjjjS_jS_jj, .-_Z40__device_stub__Z11queryKernelPjjjS_jS_jjPjjjS_jS_jj .globl _Z11queryKernelPjjjS_jS_jj .type _Z11queryKernelPjjjS_jS_jj, @function _Z11queryKernelPjjjS_jS_jj: .LFB2099: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movl 24(%rsp), %eax pushq %rax .cfi_def_cfa_offset 24 movl 24(%rsp), %eax pushq %rax .cfi_def_cfa_offset 32 call _Z40__device_stub__Z11queryKernelPjjjS_jS_jjPjjjS_jS_jj addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2099: .size _Z11queryKernelPjjjS_jS_jj, .-_Z11queryKernelPjjjS_jS_jj .section .rodata.str1.8 .align 8 .LC12: .string "Requires arguments: <files file> <dictionary input file> <query input file> <output directory>\n" .align 8 .LC13: .string "INV_DICT_SIZE:\t%u\nINV_DICT_WIDTH:\t%u\nINV_DICT_WIDTH_PAD:\t%u\nN_BLOCKS:\t%u\nBLOCK_SIZE:\t%d\n" .section .rodata.str1.1 .LC14: .string "%s/cu_query_report.txt" .LC15: .string "Err: %d\n" .LC16: .string "Ended! :)\n" .text .globl main .type main, @function main: .LFB2073: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $608, %rsp .cfi_def_cfa_offset 656 movq %fs:40, %rax movq %rax, 600(%rsp) xorl %eax, %eax cmpl $5, %edi je .L56 leaq .LC12(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $1, %eax .L55: movq 600(%rsp), %rdx subq %fs:40, %rdx jne .L61 addq $608, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L56: .cfi_restore_state movq %rsi, %rbx leaq 160(%rsp), %rdi movq 8(%rsi), %r8 leaq .LC7(%rip), %rbp movq %rbp, %rcx movl $100, %edx movl $2, %esi movl $0, %eax call __sprintf_chk@PLT leaq 48(%rsp), %r13 movq 16(%rbx), %r8 movq %rbp, %rcx movl $100, %edx movl $2, %esi movq %r13, %rdi movl $0, %eax call __sprintf_chk@PLT leaq 272(%rsp), %r12 movq 24(%rbx), %r8 movq %rbp, %rcx movl $100, %edx movl $2, %esi movq %r12, %rdi movl $0, %eax call __sprintf_chk@PLT leaq 384(%rsp), %r14 movq 32(%rbx), %r8 movq %rbp, %rcx movl $100, %edx movl $2, %esi movq %r14, %rdi movl $0, %eax call __sprintf_chk@PLT subq $8, %rsp .cfi_def_cfa_offset 664 pushq $64 .cfi_def_cfa_offset 672 movl $11, %r9d movl $768, %r8d movl $657, %ecx movl $7680000, %edx leaq .LC13(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 512(%rsp), %rdi movq %r14, %r8 leaq .LC14(%rip), %rcx movl $100, %edx movl $2, %esi movl $0, %eax call __sprintf_chk@PLT movl $30720000, %edi call malloc@PLT movq %rax, inv_dictionary(%rip) addq $16, %rsp .cfi_def_cfa_offset 656 movq %r13, %rdi call _Z19load_inv_dictionaryPc movq %r12, %rdi call _Z10load_queryPc leaq query(%rip), %rdi call _Z13findQueryOnesPc movl %eax, %ebp movq %rsp, %rdi movl $30720000, %esi call cudaMalloc@PLT movl %ebp, %ebx salq $2, %rbx leaq 16(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 8(%rsp), %rdi movl $84000, %esi call cudaMalloc@PLT movl $84000, %edx movl $0, %esi movq 8(%rsp), %rdi call cudaMemset@PLT movl $1, %ecx movl $30720000, %edx movq inv_dictionary(%rip), %rsi movq (%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx leaq query_ones(%rip), %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %edx leaq .LC15(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $64, 36(%rsp) movl $1, 40(%rsp) movl $11, 24(%rsp) movl $1, 28(%rsp) movl $0, %r9d movl $0, %r8d movq 36(%rsp), %rdx movl $1, %ecx movq 24(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L62 .L58: movl $2, %ecx movl $84000, %edx movq 8(%rsp), %rsi leaq matches(%rip), %rdi call cudaMemcpy@PLT leaq 160(%rsp), %rdi call _Z10load_filesPc leaq 496(%rsp), %rdi call _Z11reportQueryPc movq (%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq inv_dictionary(%rip), %rdi call free@PLT leaq .LC16(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $0, %eax jmp .L55 .L62: pushq $704 .cfi_def_cfa_offset 664 pushq $4 .cfi_def_cfa_offset 672 movq 24(%rsp), %r9 movl %ebp, %r8d movq 32(%rsp), %rcx movl $768, %edx movl $657, %esi movq 16(%rsp), %rdi call _Z40__device_stub__Z11queryKernelPjjjS_jS_jjPjjjS_jS_jj addq $16, %rsp .cfi_def_cfa_offset 656 jmp .L58 .L61: call __stack_chk_fail@PLT .cfi_endproc .LFE2073: .size main, .-main .section .rodata.str1.1 .LC17: .string "_Z11queryKernelPjjjS_jS_jj" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2101: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC17(%rip), %rdx movq %rdx, %rcx leaq _Z11queryKernelPjjjS_jS_jj(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2101: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .globl matches .bss .align 32 .type matches, @object .size matches, 84000 matches: .zero 84000 .globl query_ones .align 32 .type query_ones, @object .size query_ones, 40000 query_ones: .zero 40000 .globl query .align 32 .type query, @object .size query, 10000 query: .zero 10000 .globl files .align 32 .type files, @object .size files, 2100000 files: .zero 2100000 .globl inv_dictionary .align 8 .type inv_dictionary, @object .size inv_dictionary, 8 inv_dictionary: .zero 8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "cu-backup.hip" .globl _Z26__device_stub__queryKernelPjjjS_jS_jj # -- Begin function _Z26__device_stub__queryKernelPjjjS_jS_jj .p2align 4, 0x90 .type _Z26__device_stub__queryKernelPjjjS_jS_jj,@function _Z26__device_stub__queryKernelPjjjS_jS_jj: # @_Z26__device_stub__queryKernelPjjjS_jS_jj .cfi_startproc # %bb.0: subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 88(%rsp) movl %esi, 20(%rsp) movl %edx, 16(%rsp) movq %rcx, 80(%rsp) movl %r8d, 12(%rsp) movq %r9, 72(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 72(%rsp), %rax movq %rax, 136(%rsp) leaq 176(%rsp), %rax movq %rax, 144(%rsp) leaq 184(%rsp), %rax movq %rax, 152(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z11queryKernelPjjjS_jS_jj, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $184, %rsp .cfi_adjust_cfa_offset -184 retq .Lfunc_end0: .size _Z26__device_stub__queryKernelPjjjS_jS_jj, .Lfunc_end0-_Z26__device_stub__queryKernelPjjjS_jS_jj .cfi_endproc # -- End function .globl _Z19load_inv_dictionaryPc # -- Begin function _Z19load_inv_dictionaryPc .p2align 4, 0x90 .type _Z19load_inv_dictionaryPc,@function _Z19load_inv_dictionaryPc: # @_Z19load_inv_dictionaryPc .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %r14 movl $.L.str, %esi callq fopen movq %rax, %rbx testq %rax, %rax jne .LBB1_2 # %bb.1: movl $.L.str.1, %edi movq %r14, %rsi xorl %eax, %eax callq printf .LBB1_2: # %.preheader.preheader xorl %r14d, %r14d xorl %r15d, %r15d jmp .LBB1_3 .p2align 4, 0x90 .LBB1_9: # in Loop: Header=BB1_3 Depth=1 incq %r15 addq $3072, %r14 # imm = 0xC00 cmpq $10000, %r15 # imm = 0x2710 je .LBB1_10 .LBB1_3: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_4 Depth 2 xorl %r12d, %r12d jmp .LBB1_4 .p2align 4, 0x90 .LBB1_6: # in Loop: Header=BB1_4 Depth=2 addq $4, %r12 cmpq $2624, %r12 # imm = 0xA40 je .LBB1_7 .LBB1_4: # Parent Loop BB1_3 Depth=1 # => This Inner Loop Header: Depth=2 movq inv_dictionary(%rip), %rdx addq %r14, %rdx addq %r12, %rdx movl $.L.str.2, %esi movq %rbx, %rdi xorl %eax, %eax callq __isoc23_fscanf cmpl $1, %eax je .LBB1_6 # %bb.5: # in Loop: Header=BB1_4 Depth=2 movl $.Lstr.2, %edi callq puts@PLT jmp .LBB1_6 .p2align 4, 0x90 .LBB1_7: # in Loop: Header=BB1_3 Depth=1 movq inv_dictionary(%rip), %rax leaq (%r15,%r15,2), %rcx shlq $10, %rcx leaq (%rax,%rcx), %rdx addq $2624, %rdx # imm = 0xA40 movl $.L.str.4, %esi movq %rbx, %rdi xorl %eax, %eax callq __isoc23_fscanf cmpl $1, %eax je .LBB1_9 # %bb.8: # in Loop: Header=BB1_3 Depth=1 movl $.Lstr.2, %edi callq puts@PLT jmp .LBB1_9 .LBB1_10: movq %rbx, %rdi addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 jmp fclose # TAILCALL .Lfunc_end1: .size _Z19load_inv_dictionaryPc, .Lfunc_end1-_Z19load_inv_dictionaryPc .cfi_endproc # -- End function .globl _Z10load_queryPc # -- Begin function _Z10load_queryPc .p2align 4, 0x90 .type _Z10load_queryPc,@function _Z10load_queryPc: # @_Z10load_queryPc .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movq %rdi, %r14 movl $.L.str, %esi callq fopen movq %rax, %rbx testq %rax, %rax jne .LBB2_1 # %bb.6: movl $.L.str.1, %edi movq %r14, %rsi xorl %eax, %eax callq printf .LBB2_1: # %.preheader xorl %r14d, %r14d jmp .LBB2_2 .p2align 4, 0x90 .LBB2_4: # in Loop: Header=BB2_2 Depth=1 incq %r14 cmpq $10000, %r14 # imm = 0x2710 je .LBB2_5 .LBB2_2: # =>This Inner Loop Header: Depth=1 leaq query(%r14), %rdx movl $.L.str.5, %esi movq %rbx, %rdi xorl %eax, %eax callq __isoc23_fscanf cmpl $1, %eax je .LBB2_4 # %bb.3: # in Loop: Header=BB2_2 Depth=1 movl $.Lstr.2, %edi callq puts@PLT jmp .LBB2_4 .LBB2_5: movq %rbx, %rdi addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 jmp fclose # TAILCALL .Lfunc_end2: .size _Z10load_queryPc, .Lfunc_end2-_Z10load_queryPc .cfi_endproc # -- End function .globl _Z12printMatchesv # -- Begin function _Z12printMatchesv .p2align 4, 0x90 .type _Z12printMatchesv,@function _Z12printMatchesv: # @_Z12printMatchesv .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 xorl %ebx, %ebx .p2align 4, 0x90 .LBB3_1: # =>This Inner Loop Header: Depth=1 movl matches(,%rbx,4), %edx movl $.L.str.6, %edi movl %ebx, %esi xorl %eax, %eax callq printf incq %rbx cmpq $21000, %rbx # imm = 0x5208 jne .LBB3_1 # %bb.2: popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size _Z12printMatchesv, .Lfunc_end3-_Z12printMatchesv .cfi_endproc # -- End function .globl _Z10load_filesPc # -- Begin function _Z10load_filesPc .p2align 4, 0x90 .type _Z10load_filesPc,@function _Z10load_filesPc: # @_Z10load_filesPc .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %rbx movl $.L.str, %esi callq fopen movq %rax, %r14 testq %rax, %rax jne .LBB4_1 # %bb.6: movl $.L.str.1, %edi movq %rbx, %rsi xorl %eax, %eax callq printf .LBB4_1: # %.preheader xorl %r15d, %r15d jmp .LBB4_2 .p2align 4, 0x90 .LBB4_4: # in Loop: Header=BB4_2 Depth=1 addq $100, %r15 cmpq $2100000, %r15 # imm = 0x200B20 je .LBB4_5 .LBB4_2: # =>This Inner Loop Header: Depth=1 leaq files(%r15), %rdx movl $.L.str.7, %esi movq %r14, %rdi xorl %eax, %eax callq __isoc23_fscanf cmpl $1, %eax je .LBB4_4 # %bb.3: # in Loop: Header=BB4_2 Depth=1 movl $.L.str.8, %edi movq %rbx, %rsi xorl %eax, %eax callq printf jmp .LBB4_4 .LBB4_5: movq %r14, %rdi popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 jmp fclose # TAILCALL .Lfunc_end4: .size _Z10load_filesPc, .Lfunc_end4-_Z10load_filesPc .cfi_endproc # -- End function .globl _Z13findQueryOnesPc # -- Begin function _Z13findQueryOnesPc .p2align 4, 0x90 .type _Z13findQueryOnesPc,@function _Z13findQueryOnesPc: # @_Z13findQueryOnesPc .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %rbp, -16 movq %rdi, %rbx xorl %r14d, %r14d xorl %ebp, %ebp jmp .LBB5_1 .p2align 4, 0x90 .LBB5_3: # in Loop: Header=BB5_1 Depth=1 incq %r14 cmpq $10000, %r14 # imm = 0x2710 je .LBB5_4 .LBB5_1: # =>This Inner Loop Header: Depth=1 cmpb $49, (%rbx,%r14) jne .LBB5_3 # %bb.2: # in Loop: Header=BB5_1 Depth=1 movl %ebp, %eax movl %r14d, query_ones(,%rax,4) movl $.L.str.9, %edi movl %r14d, %esi xorl %eax, %eax callq printf incl %ebp jmp .LBB5_3 .LBB5_4: movl %ebp, %eax popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size _Z13findQueryOnesPc, .Lfunc_end5-_Z13findQueryOnesPc .cfi_endproc # -- End function .globl _Z11reportQueryPc # -- Begin function _Z11reportQueryPc .p2align 4, 0x90 .type _Z11reportQueryPc,@function _Z11reportQueryPc: # @_Z11reportQueryPc .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl $.L.str.10, %esi callq fopen movq %rax, %rbx movq $-84000, %r14 # imm = 0xFFFEB7E0 jmp .LBB6_1 .p2align 4, 0x90 .LBB6_3: # in Loop: Header=BB6_1 Depth=1 addq $4, %r14 je .LBB6_4 .LBB6_1: # =>This Inner Loop Header: Depth=1 movl matches+84000(%r14), %eax testl %eax, %eax je .LBB6_3 # %bb.2: # in Loop: Header=BB6_1 Depth=1 decl %eax imulq $100, %rax, %rax leaq files(%rax), %rdx movl $.L.str.11, %esi movq %rbx, %rdi xorl %eax, %eax callq fprintf jmp .LBB6_3 .LBB6_4: movq %rbx, %rdi addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 jmp fclose # TAILCALL .Lfunc_end6: .size _Z11reportQueryPc, .Lfunc_end6-_Z11reportQueryPc .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 subq $744, %rsp # imm = 0x2E8 .cfi_def_cfa_offset 784 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 cmpl $5, %edi jne .LBB7_1 # %bb.2: movq 8(%rsi), %rax leaq 192(%rsp), %rdi movq %rsi, %r15 movq %rax, %rsi callq strcpy@PLT movq 16(%r15), %rsi leaq 640(%rsp), %rbx movq %rbx, %rdi callq strcpy@PLT movq 24(%r15), %rsi leaq 416(%rsp), %r14 movq %r14, %rdi callq strcpy@PLT movq 32(%r15), %rsi leaq 528(%rsp), %r15 movq %r15, %rdi callq strcpy@PLT movl $.L.str.13, %edi movl $7680000, %esi # imm = 0x753000 movl $657, %edx # imm = 0x291 movl $768, %ecx # imm = 0x300 movl $11, %r8d movl $64, %r9d xorl %eax, %eax callq printf leaq 304(%rsp), %rdi movl $.L.str.14, %esi movq %r15, %rdx xorl %eax, %eax callq sprintf movl $30720000, %edi # imm = 0x1D4C000 callq malloc movq %rax, inv_dictionary(%rip) movq %rbx, %rdi callq _Z19load_inv_dictionaryPc movl $.L.str, %esi movq %r14, %rdi callq fopen movq %rax, %rbx testq %rax, %rax jne .LBB7_4 # %bb.3: leaq 416(%rsp), %rsi movl $.L.str.1, %edi xorl %eax, %eax callq printf .LBB7_4: # %.preheader30 xorl %r14d, %r14d jmp .LBB7_5 .p2align 4, 0x90 .LBB7_7: # in Loop: Header=BB7_5 Depth=1 incq %r14 cmpq $10000, %r14 # imm = 0x2710 je .LBB7_8 .LBB7_5: # =>This Inner Loop Header: Depth=1 leaq query(%r14), %rdx movl $.L.str.5, %esi movq %rbx, %rdi xorl %eax, %eax callq __isoc23_fscanf cmpl $1, %eax je .LBB7_7 # %bb.6: # in Loop: Header=BB7_5 Depth=1 movl $.Lstr.2, %edi callq puts@PLT jmp .LBB7_7 .LBB7_1: movl $.Lstr.4, %edi callq puts@PLT movl $1, %eax jmp .LBB7_25 .LBB7_8: # %_Z10load_queryPc.exit movq %rbx, %rdi callq fclose xorl %ebx, %ebx xorl %ebp, %ebp jmp .LBB7_9 .p2align 4, 0x90 .LBB7_11: # in Loop: Header=BB7_9 Depth=1 incq %rbx cmpq $10000, %rbx # imm = 0x2710 je .LBB7_12 .LBB7_9: # =>This Inner Loop Header: Depth=1 cmpb $49, query(%rbx) jne .LBB7_11 # %bb.10: # in Loop: Header=BB7_9 Depth=1 movl %ebp, %eax movl %ebx, query_ones(,%rax,4) movl $.L.str.9, %edi movl %ebx, %esi xorl %eax, %eax callq printf incl %ebp jmp .LBB7_11 .LBB7_12: # %_Z13findQueryOnesPc.exit leaq 24(%rsp), %rdi movl $30720000, %esi # imm = 0x1D4C000 callq hipMalloc movl %ebp, %ebx shlq $2, %rbx leaq 16(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 8(%rsp), %rdi movl $84000, %esi # imm = 0x14820 callq hipMalloc movq 8(%rsp), %rdi movl $84000, %edx # imm = 0x14820 xorl %esi, %esi callq hipMemset movq 24(%rsp), %rdi movq inv_dictionary(%rip), %rsi movl $30720000, %edx # imm = 0x1D4C000 movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movl $query_ones, %esi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movl $.L.str.15, %edi movl %eax, %esi xorl %eax, %eax callq printf movabsq $4294967307, %rdi # imm = 0x10000000B leaq 53(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB7_14 # %bb.13: movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 8(%rsp), %rdx movq %rax, 120(%rsp) movl $657, 52(%rsp) # imm = 0x291 movl $768, 48(%rsp) # imm = 0x300 movq %rcx, 112(%rsp) movl %ebp, 44(%rsp) movq %rdx, 104(%rsp) movl $4, 40(%rsp) movl $704, 36(%rsp) # imm = 0x2C0 leaq 120(%rsp), %rax movq %rax, 128(%rsp) leaq 52(%rsp), %rax movq %rax, 136(%rsp) leaq 48(%rsp), %rax movq %rax, 144(%rsp) leaq 112(%rsp), %rax movq %rax, 152(%rsp) leaq 44(%rsp), %rax movq %rax, 160(%rsp) leaq 104(%rsp), %rax movq %rax, 168(%rsp) leaq 40(%rsp), %rax movq %rax, 176(%rsp) leaq 36(%rsp), %rax movq %rax, 184(%rsp) leaq 88(%rsp), %rdi leaq 72(%rsp), %rsi leaq 64(%rsp), %rdx leaq 56(%rsp), %rcx callq __hipPopCallConfiguration movq 88(%rsp), %rsi movl 96(%rsp), %edx movq 72(%rsp), %rcx movl 80(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z11queryKernelPjjjS_jS_jj, %edi pushq 56(%rsp) .cfi_adjust_cfa_offset 8 pushq 72(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB7_14: movq 8(%rsp), %rsi movl $matches, %edi movl $84000, %edx # imm = 0x14820 movl $2, %ecx callq hipMemcpy leaq 192(%rsp), %rdi movl $.L.str, %esi callq fopen movq %rax, %rbx testq %rax, %rax jne .LBB7_16 # %bb.15: leaq 192(%rsp), %rsi movl $.L.str.1, %edi xorl %eax, %eax callq printf .LBB7_16: # %.preheader leaq 192(%rsp), %r14 xorl %r15d, %r15d jmp .LBB7_17 .p2align 4, 0x90 .LBB7_19: # in Loop: Header=BB7_17 Depth=1 addq $100, %r15 cmpq $2100000, %r15 # imm = 0x200B20 je .LBB7_20 .LBB7_17: # =>This Inner Loop Header: Depth=1 leaq files(%r15), %rdx movl $.L.str.7, %esi movq %rbx, %rdi xorl %eax, %eax callq __isoc23_fscanf cmpl $1, %eax je .LBB7_19 # %bb.18: # in Loop: Header=BB7_17 Depth=1 movl $.L.str.8, %edi movq %r14, %rsi xorl %eax, %eax callq printf jmp .LBB7_19 .LBB7_20: # %_Z10load_filesPc.exit movq %rbx, %rdi callq fclose leaq 304(%rsp), %rdi movl $.L.str.10, %esi callq fopen movq %rax, %rbx movq $-84000, %r14 # imm = 0xFFFEB7E0 jmp .LBB7_21 .p2align 4, 0x90 .LBB7_23: # in Loop: Header=BB7_21 Depth=1 addq $4, %r14 je .LBB7_24 .LBB7_21: # =>This Inner Loop Header: Depth=1 movl matches+84000(%r14), %eax testl %eax, %eax je .LBB7_23 # %bb.22: # in Loop: Header=BB7_21 Depth=1 decl %eax imulq $100, %rax, %rax leaq files(%rax), %rdx movl $.L.str.11, %esi movq %rbx, %rdi xorl %eax, %eax callq fprintf jmp .LBB7_23 .LBB7_24: # %_Z11reportQueryPc.exit movq %rbx, %rdi callq fclose movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq inv_dictionary(%rip), %rdi callq free movl $.Lstr.3, %edi callq puts@PLT xorl %eax, %eax .LBB7_25: addq $744, %rsp # imm = 0x2E8 .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end7: .size main, .Lfunc_end7-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB8_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB8_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11queryKernelPjjjS_jS_jj, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end8: .size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB9_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB9_2: retq .Lfunc_end9: .size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor .cfi_endproc # -- End function .type inv_dictionary,@object # @inv_dictionary .bss .globl inv_dictionary .p2align 3, 0x0 inv_dictionary: .quad 0 .size inv_dictionary, 8 .type files,@object # @files .globl files .p2align 4, 0x0 files: .zero 2100000 .size files, 2100000 .type query,@object # @query .globl query .p2align 4, 0x0 query: .zero 10000 .size query, 10000 .type query_ones,@object # @query_ones .globl query_ones .p2align 4, 0x0 query_ones: .zero 40000 .size query_ones, 40000 .type matches,@object # @matches .globl matches .p2align 4, 0x0 matches: .zero 84000 .size matches, 84000 .type _Z11queryKernelPjjjS_jS_jj,@object # @_Z11queryKernelPjjjS_jS_jj .section .rodata,"a",@progbits .globl _Z11queryKernelPjjjS_jS_jj .p2align 3, 0x0 _Z11queryKernelPjjjS_jS_jj: .quad _Z26__device_stub__queryKernelPjjjS_jS_jj .size _Z11queryKernelPjjjS_jS_jj, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "r" .size .L.str, 2 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Error while opening %s\n" .size .L.str.1, 24 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%u " .size .L.str.2, 4 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "%u\n" .size .L.str.4, 4 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "%c" .size .L.str.5, 3 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "Match %d:\t%u\n" .size .L.str.6, 14 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "%s" .size .L.str.7, 3 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "Error reading file: %s\n" .size .L.str.8, 24 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz "One position:\t%d\n" .size .L.str.9, 18 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "w+" .size .L.str.10, 3 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz "%s\n" .size .L.str.11, 4 .type .L.str.13,@object # @.str.13 .L.str.13: .asciz "INV_DICT_SIZE:\t%u\nINV_DICT_WIDTH:\t%u\nINV_DICT_WIDTH_PAD:\t%u\nN_BLOCKS:\t%u\nBLOCK_SIZE:\t%d\n" .size .L.str.13, 89 .type .L.str.14,@object # @.str.14 .L.str.14: .asciz "%s/cu_query_report.txt" .size .L.str.14, 23 .type .L.str.15,@object # @.str.15 .L.str.15: .asciz "Err: %d\n" .size .L.str.15, 9 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z11queryKernelPjjjS_jS_jj" .size .L__unnamed_1, 27 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr.2,@object # @str.2 .section .rodata.str1.1,"aMS",@progbits,1 .Lstr.2: .asciz "ERROR!\t fscanf did not fill all arguments" .size .Lstr.2, 42 .type .Lstr.3,@object # @str.3 .Lstr.3: .asciz "Ended! :)" .size .Lstr.3, 10 .type .Lstr.4,@object # @str.4 .Lstr.4: .asciz "Requires arguments: <files file> <dictionary input file> <query input file> <output directory>" .size .Lstr.4, 95 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z26__device_stub__queryKernelPjjjS_jS_jj .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym files .addrsig_sym query .addrsig_sym query_ones .addrsig_sym matches .addrsig_sym _Z11queryKernelPjjjS_jS_jj .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
//#pragma comment (lib, "cublas.lib") //#include "stdio.h" //#include <cuda.h> //using namespace std; //#include <ctime> //#include "cuda_runtime.h" //#include "curand_kernel.h" //#include "device_launch_parameters.h" //#include <stdio.h> //#include <stdlib.h> // //#include <string> //#include <iomanip> //#include <time.h> //#include <iostream> //#include <cmath> //#include <math.h> // //#define TRAIN_NUM 60000 //#define TEST_NUM 10000 //#define ROW 28 //#define COL 28 //#define CONV_SIZE 24 //#define POOL_SIZE 12 //#define FC1_SIZE 5 //#define FC2_SIZE 10 //#define CONV_W_SIZE 5 //#define CONV_W_NUM 6 // //int correct_cnt; //float avg_error; //float max_acc; // //float alpha = 0.2; //int epochs = 5; //int minibatch = 1; // //float train_image[TRAIN_NUM][ROW][COL]; //int train_label[TRAIN_NUM]; //float test_image[TEST_NUM][ROW][COL]; //int test_label[TEST_NUM]; // //float conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE]; //float conv_b[CONV_W_NUM]; //float fc1_b[FC1_SIZE]; //float fc1_w[FC1_SIZE][CONV_W_NUM][POOL_SIZE][POOL_SIZE]; //float fc2_b[FC2_SIZE]; //float fc2_w[FC2_SIZE][FC1_SIZE]; // //float input[ROW][COL]; //float conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; //float conv_a[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; //int pool_pos[CONV_W_NUM][POOL_SIZE][POOL_SIZE]; //float pool[CONV_W_NUM][POOL_SIZE][POOL_SIZE]; //float fc1_z[FC1_SIZE]; //float fc1_a[FC1_SIZE]; //float fc2_z[FC2_SIZE]; //float fc2_a[FC2_SIZE]; //float output[FC2_SIZE]; //int answer[FC2_SIZE]; // //float conv_dw[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE]; //float conv_db[CONV_W_NUM]; //float fc1_db[FC1_SIZE]; //float fc1_dw[FC1_SIZE][CONV_W_NUM][POOL_SIZE][POOL_SIZE]; //float fc2_db[FC2_SIZE]; //float fc2_dw[FC2_SIZE][FC1_SIZE]; //float C[FC2_SIZE]; //float fc2_delta[FC2_SIZE]; //float fc1_delta[FC1_SIZE]; //float conv_sigma_delta[CONV_W_NUM]; //float conv_delta[CONV_W_NUM][POOL_SIZE][POOL_SIZE]; // //int swap_endian(int val) //{ // unsigned char c1, c2, c3, c4; // c1 = val & 255; // c2 = (val >> 8) & 255; // c3 = (val >> 16) & 255; // c4 = (val >> 24) & 255; // return ((int)c1 << 24) + ((int)c2 << 16) + ((int)c3 << 8) + c4; //} //void load_data() //{ // FILE* f_images = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\train-images.idx3-ubyte", "rb"); // FILE* f_labels = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\train-labels.idx1-ubyte", "rb"); // // int tmp; // // int magic_num; // fread(&magic_num, sizeof(int), 1, f_images); // fread(&magic_num, sizeof(int), 1, f_labels); // // // printf("debug:%d\n",swap_endian(magic_num)); // // int train_size; // fread(&train_size, sizeof(int), 1, f_images); // fread(&train_size, sizeof(int), 1, f_labels); // train_size = swap_endian(train_size); // // // printf("debug:%d\n",swap_endian(train_size)); // // int rows, cols; // fread(&rows, sizeof(int), 1, f_images); // fread(&cols, sizeof(int), 1, f_images); // rows = swap_endian(rows); // cols = swap_endian(cols); // // // printf("debug:%d\n",swap_endian(rows)); // // printf("debug:%d\n",swap_endian(cols)); // // for (int i = 0;i < train_size;i++) // { // fread(&train_label[i], 1, 1, f_labels); // if (i % 1000 == 0) // printf("Training labels : Already read %5d labels\r", i); // // printf("%d:debug:%d\r",i,train_label[i]); // // system("pause"); // } // printf("Training labels : Already read %5d labels\n", train_size); // // for (int i = 0;i < train_size;i++) // { // for (int j = 0;j < rows;j++) // for (int k = 0;k < cols;k++) // { // tmp = 0; // fread(&tmp, 1, 1, f_images); // train_image[i][j][k] = tmp; // train_image[i][j][k] /= 255; // // printf("%d %d %d debug: %f\n",i,j,k,train_image[i][j][k]); // // system("pause"); // } // if (i % 1000 == 0) // printf("Training images : Already read %5d images\r", i); // } // printf("Training images : Already read %5d images\n", train_size); // // fclose(f_images); // fclose(f_labels); // // f_images = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\t10k-images.idx3-ubyte", "rb"); // f_labels = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\t10k-labels.idx1-ubyte", "rb"); // // fread(&magic_num, sizeof(int), 1, f_images); // fread(&magic_num, sizeof(int), 1, f_labels); // // int test_size; // fread(&test_size, sizeof(int), 1, f_images); // fread(&test_size, sizeof(int), 1, f_labels); // test_size = swap_endian(test_size); // // fread(&rows, sizeof(int), 1, f_images); // fread(&cols, sizeof(int), 1, f_images); // rows = swap_endian(rows); // cols = swap_endian(cols); // // for (int i = 0;i < test_size;i++) // { // fread(&test_label[i], 1, 1, f_labels); // if (i % 1000 == 0) // printf("Testing labels : Already read %5d labels\r", i); // } // printf("Testing labels : Already read %5d labels\n", test_size); // // for (int i = 0;i < test_size;i++) // { // for (int j = 0;j < rows;j++) // for (int k = 0;k < cols;k++) // { // tmp = 0; // fread(&tmp, 1, 1, f_images); // test_image[i][j][k] = tmp; // test_image[i][j][k] /= 255; // } // if (i % 1000 == 0) // printf("Testing images : Already read %5d images\r", i); // } // printf("Testing images : Already read %5d images\n\n", test_size); // // fclose(f_images); // fclose(f_labels); //} // //float sigmoid(float x) //{ // return (1 / (1 + exp(-1 * x))); //} // //void set_input(int idx, float image[TRAIN_NUM][ROW][COL]) //{ // for (int i = 0;i < ROW;i++) // for (int j = 0;j < COL;j++) // input[i][j] = image[idx][i][j]; //} // //void input_conv() //{ // for (int i = 0;i < CONV_W_NUM;i++) // for (int j = 0;j < CONV_SIZE;j++) // for (int k = 0;k < CONV_SIZE;k++) // { // conv_z[i][j][k] = 0; // for (int l = 0;l < CONV_W_SIZE;l++) // for (int m = 0;m < CONV_W_SIZE;m++) // conv_z[i][j][k] += input[j + l][k + m] * conv_w[i][l][m]; // conv_z[i][j][k] += conv_b[i]; // conv_a[i][j][k] = sigmoid(conv_z[i][j][k]); // } //} // //void conv_pool() //{ // for (int i = 0;i < CONV_W_NUM;i++) // for (int j = 0;j < POOL_SIZE;j++) // for (int k = 0;k < POOL_SIZE;k++) // { // float _max = conv_a[i][j * 2][k * 2]; // pool_pos[i][j][k] = 0; // if (conv_a[i][j * 2][k * 2 + 1] > _max) // { // _max = conv_a[i][j * 2][k * 2 + 1]; // pool_pos[i][j][k] = 1; // } // if (conv_a[i][j * 2 + 1][k * 2] > _max) // { // _max = conv_a[i][j * 2 + 1][k * 2]; // pool_pos[i][j][k] = 2; // } // if (conv_a[i][j * 2 + 1][k * 2 + 1] > _max) // { // _max = conv_a[i][j * 2 + 1][k * 2 + 1]; // pool_pos[i][j][k] = 3; // } // pool[i][j][k] = _max; // } //} // //void pool_fc1() //{ // for (int i = 0;i < FC1_SIZE;i++) // { // fc1_z[i] = 0; // for (int j = 0;j < CONV_W_NUM;j++) // for (int k = 0;k < POOL_SIZE;k++) // for (int l = 0;l < POOL_SIZE;l++) // fc1_z[i] += pool[j][k][l] * fc1_w[i][j][k][l]; // fc1_z[i] += fc1_b[i]; // fc1_a[i] = sigmoid(fc1_z[i]); // } //} // //void fc1_fc2() //{ // for (int i = 0;i < FC2_SIZE;i++) // { // fc2_z[i] = 0; // for (int j = 0;j < FC1_SIZE;j++) // fc2_z[i] += fc1_a[j] * fc2_w[i][j]; // fc2_z[i] += fc2_b[i]; // fc2_a[i] = sigmoid(fc2_z[i]); // } //} // //void set_answer(int idx, int label[TRAIN_NUM]) //{ // for (int i = 0;i < FC2_SIZE;i++) // { // output[i] = fc2_a[i]; // answer[i] = (label[idx] == i) ? 1 : 0; // } //} // //void check_answer(int& correct_cnt) //{ // float _max = output[0]; // int max_pos = 0; // for (int i = 0;i < FC2_SIZE;i++) // { // if (_max < output[i]) // { // _max = output[i]; // max_pos = i; // } // } // if (answer[max_pos]) // correct_cnt++; //} // //void get_error(float& avg_error) //{ // for (int i = 0;i < FC2_SIZE;i++) // { // C[i] = output[i] - answer[i]; // avg_error += C[i] * C[i] * 0.5; // } //} // // //void update_fc2_b() //{ // for (int i = 0;i < FC2_SIZE;i++) // { // fc2_delta[i] = alpha * C[i] * (fc2_a[i] * (1.0 - fc2_a[i])); // fc2_db[i] += fc2_delta[i]; // } //} // //void update_fc2_w() //{ // for (int i = 0;i < FC2_SIZE;i++) // for (int j = 0;j < FC1_SIZE;j++) // fc2_dw[i][j] += fc2_delta[i] * fc1_a[j]; //} // //void update_fc1_b() //{ // for (int i = 0;i < FC1_SIZE;i++) // { // float error = 0; // for (int j = 0;j < FC2_SIZE;j++) // error += fc2_delta[j] * fc2_w[j][i]; // fc1_delta[i] = error * (fc1_a[i] * (1.0 - fc1_a[i])); // fc1_db[i] += fc1_delta[i]; // } //} // //void update_fc1_w() //{ // for (int i = 0;i < FC1_SIZE;i++) // for (int j = 0;j < CONV_W_NUM;j++) // for (int k = 0;k < POOL_SIZE;k++) // for (int l = 0;l < POOL_SIZE;l++) // fc1_dw[i][j][k][l] += fc1_delta[i] * pool[j][k][l]; //} // //void update_conv_b() //{ // for (int i = 0;i < CONV_W_NUM;i++) // { // conv_sigma_delta[i] = 0; // for (int j = 0;j < POOL_SIZE;j++) // for (int k = 0;k < POOL_SIZE;k++) // { // float error = 0; // conv_delta[i][j][k] = 0; // for (int l = 0;l < FC1_SIZE;l++) // error += fc1_delta[l] * fc1_w[l][i][j][k]; // conv_delta[i][j][k] = error * (pool[i][j][k] * (1.0 - pool[i][j][k])); // conv_sigma_delta[i] += error * (pool[i][j][k] * (1.0 - pool[i][j][k])); // } // conv_db[i] += conv_sigma_delta[i]; // } //} // //void update_conv_w() //{ // for (int i = 0;i < CONV_W_NUM;i++) // for (int j = 0;j < CONV_W_SIZE;j++) // for (int k = 0;k < CONV_W_SIZE;k++) // { // float error = 0; // for (int m = 0;m < POOL_SIZE;m++) // for (int n = 0;n < POOL_SIZE;n++) // { // int x = pool_pos[i][m][n] / 2; // int y = pool_pos[i][m][n] % 2; // error += conv_delta[i][m][n] * input[2 * m + j + x][2 * n + k + y]; // } // conv_dw[i][j][k] += error; // } //} // //void assign_grads() //{ // for (int i = 0;i < FC2_SIZE;i++) // { // fc2_b[i] -= (fc2_db[i] / minibatch); // fc2_db[i] = 0; // } // // for (int i = 0;i < FC2_SIZE;i++) // for (int j = 0;j < FC1_SIZE;j++) // { // fc2_w[i][j] -= (fc2_dw[i][j] / minibatch); // fc2_dw[i][j] = 0; // } // // for (int i = 0;i < FC1_SIZE;i++) // { // fc1_b[i] -= (fc1_db[i] / minibatch); // fc1_db[i] = 0; // } // // for (int i = 0;i < FC1_SIZE;i++) // for (int j = 0;j < CONV_W_NUM;j++) // for (int k = 0;k < POOL_SIZE;k++) // for (int l = 0;l < POOL_SIZE;l++) // { // fc1_w[i][j][k][l] -= (fc1_dw[i][j][k][l] / minibatch); // fc1_dw[i][j][k][l] = 0; // } // // for (int i = 0;i < CONV_W_NUM;i++) // { // conv_b[i] -= (conv_db[i] / minibatch); // conv_db[i] = 0; // } // // for (int i = 0;i < CONV_W_NUM;i++) // for (int l = 0;l < CONV_W_SIZE;l++) // for (int m = 0;m < CONV_W_SIZE;m++) // { // conv_w[i][l][m] -= (conv_dw[i][l][m] / minibatch); // conv_dw[i][l][m] = 0; // } //} // //float get_rand(float fan_in) //{ // float sum = 0; // for (int i = 0;i < 12;i++) // sum += (float)rand() / RAND_MAX; // sum -= 6; // sum *= 1 / sqrt(fan_in); // return sum; //} //void init_params() //{ // for (int i = 0;i < CONV_W_NUM;i++) // { // for (int j = 0;j < CONV_W_SIZE;j++) // for (int k = 0;k < CONV_W_SIZE;k++) // conv_w[i][j][k] = get_rand(CONV_W_SIZE * CONV_W_SIZE); // conv_b[i] = get_rand(CONV_W_SIZE * CONV_W_SIZE); // } // // for (int i = 0;i < FC1_SIZE;i++) // { // for (int j = 0;j < CONV_W_NUM;j++) // for (int k = 0;k < POOL_SIZE;k++) // for (int l = 0;l < POOL_SIZE;l++) // fc1_w[i][j][k][l] = get_rand(POOL_SIZE * POOL_SIZE * CONV_W_NUM); // fc1_b[i] = get_rand(POOL_SIZE * POOL_SIZE * CONV_W_NUM); // } // // for (int i = 0;i < FC2_SIZE;i++) // { // for (int j = 0;j < FC1_SIZE;j++) // fc2_w[i][j] = get_rand(FC1_SIZE); // fc2_b[i] = get_rand(FC1_SIZE); // } //} //int main() { // // load_data(); // clock_t t = clock(); // init_params(); // // for (int i = 1;i <= epochs;i++) // { // correct_cnt = 0; // avg_error = 0; // // for (int j = 0;j < TRAIN_NUM;j++) // { // set_input(j, train_image); // input_conv(); // conv_pool(); // pool_fc1(); // fc1_fc2(); // set_answer(j, train_label); // check_answer(correct_cnt); // get_error(avg_error); // // update_fc2_b(); // update_fc2_w(); // update_fc1_b(); // update_fc1_w(); // update_conv_b(); // update_conv_w(); // if ((j + 1) % minibatch == 0) // assign_grads(); // // if (j && j % 100 == 0) // printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j, ((float)correct_cnt / j) * 100, (avg_error / j) * 100, i); // } // printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TRAIN_NUM, ((float)correct_cnt / TRAIN_NUM) * 100, (avg_error / TRAIN_NUM) * 100, i); // // correct_cnt = 0; // avg_error = 0; // // for (int j = 0;j < TEST_NUM;j++) // { // set_input(j, test_image); // input_conv(); // conv_pool(); // pool_fc1(); // fc1_fc2(); // set_answer(j, test_label); // check_answer(correct_cnt); // get_error(avg_error); // // if (j && j % 100 == 0) // printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j, ((float)correct_cnt / j) * 100, (avg_error / j) * 100); // } // printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TEST_NUM, ((float)correct_cnt / TEST_NUM) * 100, (avg_error / TEST_NUM) * 100); // // if ((float)correct_cnt / TEST_NUM * 100 > max_acc) // { // max_acc = (float)correct_cnt / TEST_NUM * 100; // //export_params(); // printf("The new model has been exported.Accuracy has reached to %0.5f%%\n\n", max_acc); // } // else // { // alpha = alpha - (alpha / 3); // printf("Learning rate has been reduced to %f\n\n", alpha); // } // } // // // // //float train_image[ROW][COL] = { // //{ 3, 1, 2, 4, 3, 3 }, // //{ 2, 4, 3, 1, 1, 4 }, // //{ 1, 5, 2, 3, 2, 5 }, // //{ 2, 3, 4, 1, 4, 1 }, // //{ 1, 4, 2, 1, 2, 3 }, // //{ 2, 3, 6, 5, 4, 1 }, }; // //float conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE] = { { // //{1, 2, 3}, // //{4, 3, 1}, // //{1, 2, 4}}, // //{{4, 2, 5}, // //{2, 3, 1}, // //{1, 2, 3}} }; // // ////float conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; // //float conv_z[2][2][2]; // //float train_label[2] = { 3,2 }; // // //cudaMemcpyToSymbol(_train_image, train_image, ROW * COL * sizeof(float)); // //cudaMemcpyToSymbol(_conv_w, conv_w, CONV_W_NUM * CONV_W_SIZE * CONV_W_SIZE * sizeof(float)); // ////cudaMemcpy(_train_label, train_label, 2 * sizeof(float), cudaMemcpyHostToDevice); // ////cudaMemcpy(_train_image, train_image, ROW * COL * sizeof(float), cudaMemcpyHostToDevice); // ////cudaMemcpy(_conv_w, conv_w, CONV_W_NUM*CONV_W_SIZE*CONV_W_SIZE*sizeof(float), cudaMemcpyHostToDevice); // //dim3 grid2(2, 4, 4); // // ////_input_conv << <1, grid2>> > ((float (*)[4])_train_image, (float (*)[3][3])_conv_w, (float (*)[2][2])_conv_z); // //_input_conv << <1, grid2 >> > (); // //_conv_pool << <1, grid2 >> > (); // ////cudaMemcpyFromSymbol(&conv_z, _pool, CONV_W_NUM * CONV_SIZE * CONV_SIZE * sizeof(float)); // //cudaMemcpyFromSymbol(&conv_z, _pool, 8 * sizeof(float)); // //for (int i = 0;i < 2;i++) { // // for (int j = 0;j <2;j++) { // // cout << conv_z[0][i][j] << " "; // // } // // cout << endl; // //} // //for (int i = 0;i < 2;i++) { // // for (int j = 0;j < 2;j++) { // // cout << conv_z[1][i][j] << " "; // // } // // cout << endl; // //} // return 0; //}
code for sm_80