system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include "stdio.h"
#include<iostream>
#include <hip/hip_runtime.h>
#include <math.h>
#define TILE_SIZE 2
__global__ void gpu_Matrix_Mul_nonshared(float *d_a, float *d_b, float *d_c, const int size)
{
int row, col;
col = TILE_SIZE * blockIdx.x + threadIdx.x;
row = TILE_SIZE * blockIdx.y + threadIdx.y;
for(int k = 0; k < size; k++){
d_c[row * size + col] += d_a[row * size + k] * d_b[k * size + col];
}
}
__global__ void gpu_Matrix_Mul_shared(float *d_a, float *d_b, float *d_c, const int size)
{
int row, col;
col = TILE_SIZE * blockIdx.x + threadIdx.x;
row = TILE_SIZE * blockIdx.y + threadIdx.y;
__shared__ float share_a[TILE_SIZE][TILE_SIZE];
__shared__ float share_b[TILE_SIZE][TILE_SIZE];
for(int i = 0; i < size/TILE_SIZE; i++){
share_a[threadIdx.y][threadIdx.x] = d_a[row * size + (i*TILE_SIZE + threadIdx.x)];
share_b[threadIdx.y][threadIdx.x] = d_b[(i*TILE_SIZE + threadIdx.x) * size + col];
__syncthreads();
for(int j = 0; j < TILE_SIZE; j++){
d_c[row * size + col] += share_a[threadIdx.y][j] * share_b[j][threadIdx.x];
}
__syncthreads();
}
}
int main(void){
const int size = 4;
float h_a[size][size];
float h_b[size][size];
float h_result[size][size];
float *d_a, *d_b, *d_result;
// Init array
for(int i=0; i < size; i++){
for(int j=0; j<size;j++){
h_a[i][j] = i;
h_b[i][j] = j;
}
}
hipMalloc((void **)&d_a, size * size * sizeof(int));
hipMalloc((void **)&d_b, size * size * sizeof(int));
hipMalloc((void **)&d_result, size * size * sizeof(int));
hipMemcpy(d_a, h_a, size * size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, size * size * sizeof(int), hipMemcpyHostToDevice);
dim3 dimGrid(size/TILE_SIZE, size/TILE_SIZE, 1);
dim3 dimBlock(TILE_SIZE, TILE_SIZE, 1);
// gpu_Matrix_Mul_nonshared << <dimGrid, dimBlock >> > (d_a, d_b, d_result, size);
gpu_Matrix_Mul_shared<<<dimGrid, dimBlock>>> (d_a, d_b, d_result,size);
hipMemcpy(h_result, d_result, size*size*sizeof(int), hipMemcpyDeviceToHost);
printf("The result of Matrix multiplication is: \n");
for (int i = 0; i< size; i++)
{
for (int j = 0; j < size; j++)
{
printf("%f ", h_result[i][j]);
}
printf("\n");
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_result);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z24gpu_Matrix_Mul_nonsharedPfS_S_i
.globl _Z24gpu_Matrix_Mul_nonsharedPfS_S_i
.p2align 8
.type _Z24gpu_Matrix_Mul_nonsharedPfS_S_i,@function
_Z24gpu_Matrix_Mul_nonsharedPfS_S_i:
s_load_b32 s4, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s4, 1
s_cbranch_scc1 .LBB0_3
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_load_b64 s[2:3], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshl_add_u32 v1, s15, 1, v1
v_lshl_add_u32 v0, s14, 1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v1, s4
v_add_nc_u32_e32 v1, v4, v0
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
s_load_b128 s[0:3], s[0:1], 0x0
global_load_b32 v6, v[2:3], off
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
s_mov_b32 s0, s4
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v1, 31, v0
s_add_i32 s0, s0, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_eq_u32 s0, 0
v_lshlrev_b64 v[7:8], 2, v[0:1]
v_add_nc_u32_e32 v0, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s2, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s3, v8, vcc_lo
global_load_b32 v1, v[4:5], off
global_load_b32 v7, v[7:8], off
v_add_co_u32 v4, vcc_lo, v4, 4
v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v6, v1, v7
global_store_b32 v[2:3], v6, off
s_cbranch_scc0 .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z24gpu_Matrix_Mul_nonsharedPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 28
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z24gpu_Matrix_Mul_nonsharedPfS_S_i, .Lfunc_end0-_Z24gpu_Matrix_Mul_nonsharedPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z21gpu_Matrix_Mul_sharedPfS_S_i
.globl _Z21gpu_Matrix_Mul_sharedPfS_S_i
.p2align 8
.type _Z21gpu_Matrix_Mul_sharedPfS_S_i,@function
_Z21gpu_Matrix_Mul_sharedPfS_S_i:
s_load_b32 s4, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s4, 2
s_cbranch_scc1 .LBB1_5
v_bfe_u32 v3, v0, 10, 10
v_and_b32_e32 v5, 0x3ff, v0
s_clause 0x1
s_load_b64 s[6:7], s[0:1], 0x10
s_load_b128 s[0:3], s[0:1], 0x0
s_lshr_b32 s5, s4, 31
v_lshl_add_u32 v1, s15, 1, v3
v_lshlrev_b32_e32 v4, 2, v5
v_lshlrev_b32_e32 v7, 3, v3
s_add_i32 s5, s4, s5
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_3)
s_ashr_i32 s5, s5, 1
v_mul_lo_u32 v6, v1, s4
v_add_nc_u32_e32 v8, 16, v4
v_dual_mov_b32 v4, 0 :: v_dual_add_nc_u32 v9, v7, v4
v_lshl_add_u32 v0, s14, 1, v5
v_add_nc_u32_e32 v10, v8, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, v6, v0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
s_mov_b32 s6, 0
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB1_2:
v_lshl_add_u32 v13, s6, 1, v5
s_mov_b32 s7, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v3, v13, v6
v_mad_u64_u32 v[11:12], null, v13, s4, v[0:1]
v_mov_b32_e32 v12, v4
v_lshlrev_b64 v[13:14], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[11:12], 2, v[11:12]
v_add_co_u32 v13, vcc_lo, s0, v13
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v14, vcc_lo, s1, v14, vcc_lo
v_add_co_u32 v11, vcc_lo, s2, v11
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v12, vcc_lo, s3, v12, vcc_lo
global_load_b32 v3, v[13:14], off
global_load_b32 v11, v[11:12], off
s_waitcnt vmcnt(1)
ds_store_b32 v9, v3
s_waitcnt vmcnt(0)
ds_store_b32 v10, v11
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
global_load_b32 v3, v[1:2], off
v_mov_b32_e32 v11, v8
.LBB1_3:
v_add_nc_u32_e32 v12, s7, v7
s_add_i32 s7, s7, 4
ds_load_b32 v13, v11
ds_load_b32 v12, v12
v_add_nc_u32_e32 v11, 8, v11
s_cmp_lg_u32 s7, 4
s_waitcnt vmcnt(0) lgkmcnt(0)
v_fmac_f32_e32 v3, v12, v13
s_cbranch_scc0 .LBB1_3
s_add_i32 s6, s6, 1
global_store_b32 v[1:2], v3, off
s_cmp_eq_u32 s6, s5
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB1_2
.LBB1_5:
s_set_inst_prefetch_distance 0x2
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z21gpu_Matrix_Mul_sharedPfS_S_i
.amdhsa_group_segment_fixed_size 32
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 28
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z21gpu_Matrix_Mul_sharedPfS_S_i, .Lfunc_end1-_Z21gpu_Matrix_Mul_sharedPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 28
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z24gpu_Matrix_Mul_nonsharedPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z24gpu_Matrix_Mul_nonsharedPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 32
.kernarg_segment_align: 8
.kernarg_segment_size: 28
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z21gpu_Matrix_Mul_sharedPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z21gpu_Matrix_Mul_sharedPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include "stdio.h"
#include<iostream>
#include <hip/hip_runtime.h>
#include <math.h>
#define TILE_SIZE 2
__global__ void gpu_Matrix_Mul_nonshared(float *d_a, float *d_b, float *d_c, const int size)
{
int row, col;
col = TILE_SIZE * blockIdx.x + threadIdx.x;
row = TILE_SIZE * blockIdx.y + threadIdx.y;
for(int k = 0; k < size; k++){
d_c[row * size + col] += d_a[row * size + k] * d_b[k * size + col];
}
}
__global__ void gpu_Matrix_Mul_shared(float *d_a, float *d_b, float *d_c, const int size)
{
int row, col;
col = TILE_SIZE * blockIdx.x + threadIdx.x;
row = TILE_SIZE * blockIdx.y + threadIdx.y;
__shared__ float share_a[TILE_SIZE][TILE_SIZE];
__shared__ float share_b[TILE_SIZE][TILE_SIZE];
for(int i = 0; i < size/TILE_SIZE; i++){
share_a[threadIdx.y][threadIdx.x] = d_a[row * size + (i*TILE_SIZE + threadIdx.x)];
share_b[threadIdx.y][threadIdx.x] = d_b[(i*TILE_SIZE + threadIdx.x) * size + col];
__syncthreads();
for(int j = 0; j < TILE_SIZE; j++){
d_c[row * size + col] += share_a[threadIdx.y][j] * share_b[j][threadIdx.x];
}
__syncthreads();
}
}
int main(void){
const int size = 4;
float h_a[size][size];
float h_b[size][size];
float h_result[size][size];
float *d_a, *d_b, *d_result;
// Init array
for(int i=0; i < size; i++){
for(int j=0; j<size;j++){
h_a[i][j] = i;
h_b[i][j] = j;
}
}
hipMalloc((void **)&d_a, size * size * sizeof(int));
hipMalloc((void **)&d_b, size * size * sizeof(int));
hipMalloc((void **)&d_result, size * size * sizeof(int));
hipMemcpy(d_a, h_a, size * size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, size * size * sizeof(int), hipMemcpyHostToDevice);
dim3 dimGrid(size/TILE_SIZE, size/TILE_SIZE, 1);
dim3 dimBlock(TILE_SIZE, TILE_SIZE, 1);
// gpu_Matrix_Mul_nonshared << <dimGrid, dimBlock >> > (d_a, d_b, d_result, size);
gpu_Matrix_Mul_shared<<<dimGrid, dimBlock>>> (d_a, d_b, d_result,size);
hipMemcpy(h_result, d_result, size*size*sizeof(int), hipMemcpyDeviceToHost);
printf("The result of Matrix multiplication is: \n");
for (int i = 0; i< size; i++)
{
for (int j = 0; j < size; j++)
{
printf("%f ", h_result[i][j]);
}
printf("\n");
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_result);
return 0;
} | .text
.file "03_matrix_multiplication.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i # -- Begin function _Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i
.p2align 4, 0x90
.type _Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i,@function
_Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i: # @_Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z24gpu_Matrix_Mul_nonsharedPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i, .Lfunc_end0-_Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i
.cfi_endproc
# -- End function
.globl _Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i # -- Begin function _Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i
.p2align 4, 0x90
.type _Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i,@function
_Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i: # @_Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z21gpu_Matrix_Mul_sharedPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i, .Lfunc_end1-_Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $304, %rsp # imm = 0x130
.cfi_def_cfa_offset 336
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 240(%rsp), %rax
leaq 176(%rsp), %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB2_1: # %.preheader28
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
xorps %xmm0, %xmm0
cvtsi2ss %edx, %xmm0
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
xorps %xmm1, %xmm1
cvtsi2ss %esi, %xmm1
movss %xmm0, (%rax,%rsi,4)
movss %xmm1, (%rcx,%rsi,4)
incq %rsi
cmpq $4, %rsi
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
incq %rdx
addq $16, %rax
addq $16, %rcx
cmpq $4, %rdx
jne .LBB2_1
# %bb.4:
leaq 24(%rsp), %rdi
movl $64, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $64, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $64, %esi
callq hipMalloc
movq 24(%rsp), %rdi
leaq 240(%rsp), %rsi
movl $64, %edx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
leaq 176(%rsp), %rsi
movl $64, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $8589934594, %rdi # imm = 0x200000002
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $4, 36(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z21gpu_Matrix_Mul_sharedPfS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
movq 8(%rsp), %rsi
leaq 112(%rsp), %rbx
movl $64, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_7: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_8 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_8: # Parent Loop BB2_7 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
incq %r15
cmpq $4, %r15
jne .LBB2_8
# %bb.9: # in Loop: Header=BB2_7 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $16, %rbx
cmpq $4, %r14
jne .LBB2_7
# %bb.10:
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $304, %rsp # imm = 0x130
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z24gpu_Matrix_Mul_nonsharedPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21gpu_Matrix_Mul_sharedPfS_S_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z24gpu_Matrix_Mul_nonsharedPfS_S_i,@object # @_Z24gpu_Matrix_Mul_nonsharedPfS_S_i
.section .rodata,"a",@progbits
.globl _Z24gpu_Matrix_Mul_nonsharedPfS_S_i
.p2align 3, 0x0
_Z24gpu_Matrix_Mul_nonsharedPfS_S_i:
.quad _Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i
.size _Z24gpu_Matrix_Mul_nonsharedPfS_S_i, 8
.type _Z21gpu_Matrix_Mul_sharedPfS_S_i,@object # @_Z21gpu_Matrix_Mul_sharedPfS_S_i
.globl _Z21gpu_Matrix_Mul_sharedPfS_S_i
.p2align 3, 0x0
_Z21gpu_Matrix_Mul_sharedPfS_S_i:
.quad _Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i
.size _Z21gpu_Matrix_Mul_sharedPfS_S_i, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "%f "
.size .L.str.1, 6
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z24gpu_Matrix_Mul_nonsharedPfS_S_i"
.size .L__unnamed_1, 36
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z21gpu_Matrix_Mul_sharedPfS_S_i"
.size .L__unnamed_2, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "The result of Matrix multiplication is: "
.size .Lstr, 41
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i
.addrsig_sym _Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z24gpu_Matrix_Mul_nonsharedPfS_S_i
.addrsig_sym _Z21gpu_Matrix_Mul_sharedPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000e2025_00000000-6_03_matrix_multiplication.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z49__device_stub__Z24gpu_Matrix_Mul_nonsharedPfS_S_iPfS_S_i
.type _Z49__device_stub__Z24gpu_Matrix_Mul_nonsharedPfS_S_iPfS_S_i, @function
_Z49__device_stub__Z24gpu_Matrix_Mul_nonsharedPfS_S_iPfS_S_i:
.LFB3694:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z24gpu_Matrix_Mul_nonsharedPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z49__device_stub__Z24gpu_Matrix_Mul_nonsharedPfS_S_iPfS_S_i, .-_Z49__device_stub__Z24gpu_Matrix_Mul_nonsharedPfS_S_iPfS_S_i
.globl _Z24gpu_Matrix_Mul_nonsharedPfS_S_i
.type _Z24gpu_Matrix_Mul_nonsharedPfS_S_i, @function
_Z24gpu_Matrix_Mul_nonsharedPfS_S_i:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z49__device_stub__Z24gpu_Matrix_Mul_nonsharedPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z24gpu_Matrix_Mul_nonsharedPfS_S_i, .-_Z24gpu_Matrix_Mul_nonsharedPfS_S_i
.globl _Z46__device_stub__Z21gpu_Matrix_Mul_sharedPfS_S_iPfS_S_i
.type _Z46__device_stub__Z21gpu_Matrix_Mul_sharedPfS_S_iPfS_S_i, @function
_Z46__device_stub__Z21gpu_Matrix_Mul_sharedPfS_S_iPfS_S_i:
.LFB3696:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z21gpu_Matrix_Mul_sharedPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z46__device_stub__Z21gpu_Matrix_Mul_sharedPfS_S_iPfS_S_i, .-_Z46__device_stub__Z21gpu_Matrix_Mul_sharedPfS_S_iPfS_S_i
.globl _Z21gpu_Matrix_Mul_sharedPfS_S_i
.type _Z21gpu_Matrix_Mul_sharedPfS_S_i, @function
_Z21gpu_Matrix_Mul_sharedPfS_S_i:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z21gpu_Matrix_Mul_sharedPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z21gpu_Matrix_Mul_sharedPfS_S_i, .-_Z21gpu_Matrix_Mul_sharedPfS_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "The result of Matrix multiplication is: \n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "%f "
.LC6:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $256, %rsp
.cfi_def_cfa_offset 304
movq %fs:40, %rax
movq %rax, 248(%rsp)
xorl %eax, %eax
leaq 48(%rsp), %rdx
leaq 112(%rsp), %rax
movl $0, %ecx
movss .LC1(%rip), %xmm3
movss .LC2(%rip), %xmm2
movss .LC3(%rip), %xmm1
.L20:
pxor %xmm0, %xmm0
cvtsi2ssl %ecx, %xmm0
movss %xmm0, (%rdx)
movl $0x00000000, (%rax)
movss %xmm0, 4(%rdx)
movss %xmm3, 4(%rax)
movss %xmm0, 8(%rdx)
movss %xmm2, 8(%rax)
movss %xmm0, 12(%rdx)
movss %xmm1, 12(%rax)
addl $1, %ecx
addq $16, %rdx
addq $16, %rax
cmpl $4, %ecx
jne .L20
movq %rsp, %rdi
movl $64, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $64, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $64, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $64, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 112(%rsp), %rsi
movl $1, %ecx
movl $64, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, 24(%rsp)
movl $2, 28(%rsp)
movl $1, 32(%rsp)
movl $2, 36(%rsp)
movl $2, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L29
.L21:
leaq 176(%rsp), %rdi
movl $2, %ecx
movl $64, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 192(%rsp), %rbp
leaq 256(%rsp), %r14
leaq .LC5(%rip), %r12
leaq .LC6(%rip), %r13
.L22:
leaq -16(%rbp), %rbx
.L23:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L23
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $16, %rbp
cmpq %r14, %rbp
jne .L22
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 248(%rsp), %rax
subq %fs:40, %rax
jne .L30
movl $0, %eax
addq $256, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
movl $4, %ecx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z46__device_stub__Z21gpu_Matrix_Mul_sharedPfS_S_iPfS_S_i
jmp .L21
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC7:
.string "_Z21gpu_Matrix_Mul_sharedPfS_S_i"
.align 8
.LC8:
.string "_Z24gpu_Matrix_Mul_nonsharedPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z21gpu_Matrix_Mul_sharedPfS_S_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z24gpu_Matrix_Mul_nonsharedPfS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1065353216
.align 4
.LC2:
.long 1073741824
.align 4
.LC3:
.long 1077936128
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "03_matrix_multiplication.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i # -- Begin function _Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i
.p2align 4, 0x90
.type _Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i,@function
_Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i: # @_Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z24gpu_Matrix_Mul_nonsharedPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i, .Lfunc_end0-_Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i
.cfi_endproc
# -- End function
.globl _Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i # -- Begin function _Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i
.p2align 4, 0x90
.type _Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i,@function
_Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i: # @_Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z21gpu_Matrix_Mul_sharedPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i, .Lfunc_end1-_Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $304, %rsp # imm = 0x130
.cfi_def_cfa_offset 336
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 240(%rsp), %rax
leaq 176(%rsp), %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB2_1: # %.preheader28
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
xorps %xmm0, %xmm0
cvtsi2ss %edx, %xmm0
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
xorps %xmm1, %xmm1
cvtsi2ss %esi, %xmm1
movss %xmm0, (%rax,%rsi,4)
movss %xmm1, (%rcx,%rsi,4)
incq %rsi
cmpq $4, %rsi
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
incq %rdx
addq $16, %rax
addq $16, %rcx
cmpq $4, %rdx
jne .LBB2_1
# %bb.4:
leaq 24(%rsp), %rdi
movl $64, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $64, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $64, %esi
callq hipMalloc
movq 24(%rsp), %rdi
leaq 240(%rsp), %rsi
movl $64, %edx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
leaq 176(%rsp), %rsi
movl $64, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $8589934594, %rdi # imm = 0x200000002
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $4, 36(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z21gpu_Matrix_Mul_sharedPfS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
movq 8(%rsp), %rsi
leaq 112(%rsp), %rbx
movl $64, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_7: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_8 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_8: # Parent Loop BB2_7 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
incq %r15
cmpq $4, %r15
jne .LBB2_8
# %bb.9: # in Loop: Header=BB2_7 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $16, %rbx
cmpq $4, %r14
jne .LBB2_7
# %bb.10:
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $304, %rsp # imm = 0x130
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z24gpu_Matrix_Mul_nonsharedPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21gpu_Matrix_Mul_sharedPfS_S_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z24gpu_Matrix_Mul_nonsharedPfS_S_i,@object # @_Z24gpu_Matrix_Mul_nonsharedPfS_S_i
.section .rodata,"a",@progbits
.globl _Z24gpu_Matrix_Mul_nonsharedPfS_S_i
.p2align 3, 0x0
_Z24gpu_Matrix_Mul_nonsharedPfS_S_i:
.quad _Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i
.size _Z24gpu_Matrix_Mul_nonsharedPfS_S_i, 8
.type _Z21gpu_Matrix_Mul_sharedPfS_S_i,@object # @_Z21gpu_Matrix_Mul_sharedPfS_S_i
.globl _Z21gpu_Matrix_Mul_sharedPfS_S_i
.p2align 3, 0x0
_Z21gpu_Matrix_Mul_sharedPfS_S_i:
.quad _Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i
.size _Z21gpu_Matrix_Mul_sharedPfS_S_i, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "%f "
.size .L.str.1, 6
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z24gpu_Matrix_Mul_nonsharedPfS_S_i"
.size .L__unnamed_1, 36
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z21gpu_Matrix_Mul_sharedPfS_S_i"
.size .L__unnamed_2, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "The result of Matrix multiplication is: "
.size .Lstr, 41
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z39__device_stub__gpu_Matrix_Mul_nonsharedPfS_S_i
.addrsig_sym _Z36__device_stub__gpu_Matrix_Mul_sharedPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z24gpu_Matrix_Mul_nonsharedPfS_S_i
.addrsig_sym _Z21gpu_Matrix_Mul_sharedPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#define N 40
__global__ void MatAdd(float *A, float *B, float *C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
size_t ind(int x, int y) {
return y * N + x;
}
int main() {
float A[N * N];
float B[N * N];
float C[N * N];
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
A[ind(j, i)] = 3.4;
B[ind(j, i)] = 5.4;
}
}
float *a;
float *b;
float *c;
cudaMalloc((void **) &a, N * N * sizeof(float));
cudaMalloc((void **) &b, N * N * sizeof(float));
cudaMalloc((void **) &c, N * N * sizeof(float));
cudaMemcpy(a, A, sizeof(*a) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(b, B, sizeof(*a) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(c, C, sizeof(*a) * N * N, cudaMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N * N);
MatAdd<<<numBlocks, threadsPerBlock>>>(a, b, c);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%f ", c[ind(j, i)]);
}
printf("\n");
}
cudaFree(a);
cudaFree(b);
cudaFree(c);
} | code for sm_80
Function : _Z6MatAddPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*0090*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#define N 40
__global__ void MatAdd(float *A, float *B, float *C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
size_t ind(int x, int y) {
return y * N + x;
}
int main() {
float A[N * N];
float B[N * N];
float C[N * N];
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
A[ind(j, i)] = 3.4;
B[ind(j, i)] = 5.4;
}
}
float *a;
float *b;
float *c;
cudaMalloc((void **) &a, N * N * sizeof(float));
cudaMalloc((void **) &b, N * N * sizeof(float));
cudaMalloc((void **) &c, N * N * sizeof(float));
cudaMemcpy(a, A, sizeof(*a) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(b, B, sizeof(*a) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(c, C, sizeof(*a) * N * N, cudaMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N * N);
MatAdd<<<numBlocks, threadsPerBlock>>>(a, b, c);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%f ", c[ind(j, i)]);
}
printf("\n");
}
cudaFree(a);
cudaFree(b);
cudaFree(c);
} | .file "tmpxft_001696ef_00000000-6_test_4.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z3indii
.type _Z3indii, @function
_Z3indii:
.LFB2057:
.cfi_startproc
endbr64
leal (%rsi,%rsi,4), %eax
leal (%rdi,%rax,8), %eax
cltq
ret
.cfi_endproc
.LFE2057:
.size _Z3indii, .-_Z3indii
.globl _Z29__device_stub__Z6MatAddPfS_S_PfS_S_
.type _Z29__device_stub__Z6MatAddPfS_S_PfS_S_, @function
_Z29__device_stub__Z6MatAddPfS_S_PfS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L8
.L4:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L9
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6MatAddPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L4
.L9:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z29__device_stub__Z6MatAddPfS_S_PfS_S_, .-_Z29__device_stub__Z6MatAddPfS_S_PfS_S_
.globl _Z6MatAddPfS_S_
.type _Z6MatAddPfS_S_, @function
_Z6MatAddPfS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z6MatAddPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z6MatAddPfS_S_, .-_Z6MatAddPfS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "%f "
.LC3:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
leaq -16384(%rsp), %r11
.cfi_def_cfa 11, 16424
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $2888, %rsp
.cfi_def_cfa_offset 19312
movq %fs:40, %rax
movq %rax, 19256(%rsp)
xorl %eax, %eax
leaq 48(%rsp), %rax
leaq 6448(%rsp), %rdx
movq %rdx, %rcx
movss .LC0(%rip), %xmm1
movss .LC1(%rip), %xmm0
.L13:
movl $0, %ebx
.L14:
movss %xmm1, (%rax,%rbx)
movss %xmm0, (%rdx,%rbx)
addq $4, %rbx
cmpq $160, %rbx
jne .L14
addq $160, %rax
addq $160, %rdx
cmpq %rcx, %rax
jne .L13
movq %rsp, %rdi
movl $6400, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $6400, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $6400, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $6400, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 6448(%rsp), %rsi
movl $1, %ecx
movl $6400, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 12848(%rsp), %rsi
movl $1, %ecx
movl $6400, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1600, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L24
.L16:
leaq .LC2(%rip), %r12
leaq .LC3(%rip), %r13
.L17:
leaq -160(%rbx), %rbp
.L18:
movq 16(%rsp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbp), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbp
cmpq %rbx, %rbp
jne .L18
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $160, %rbx
cmpq $6560, %rbx
jne .L17
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 19256(%rsp), %rax
subq %fs:40, %rax
jne .L25
movl $0, %eax
addq $19272, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z29__device_stub__Z6MatAddPfS_S_PfS_S_
jmp .L16
.L25:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC4:
.string "_Z6MatAddPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z6MatAddPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1079613850
.align 4
.LC1:
.long 1085066445
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#define N 40
__global__ void MatAdd(float *A, float *B, float *C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
size_t ind(int x, int y) {
return y * N + x;
}
int main() {
float A[N * N];
float B[N * N];
float C[N * N];
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
A[ind(j, i)] = 3.4;
B[ind(j, i)] = 5.4;
}
}
float *a;
float *b;
float *c;
cudaMalloc((void **) &a, N * N * sizeof(float));
cudaMalloc((void **) &b, N * N * sizeof(float));
cudaMalloc((void **) &c, N * N * sizeof(float));
cudaMemcpy(a, A, sizeof(*a) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(b, B, sizeof(*a) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(c, C, sizeof(*a) * N * N, cudaMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N * N);
MatAdd<<<numBlocks, threadsPerBlock>>>(a, b, c);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%f ", c[ind(j, i)]);
}
printf("\n");
}
cudaFree(a);
cudaFree(b);
cudaFree(c);
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#define N 40
__global__ void MatAdd(float *A, float *B, float *C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
size_t ind(int x, int y) {
return y * N + x;
}
int main() {
float A[N * N];
float B[N * N];
float C[N * N];
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
A[ind(j, i)] = 3.4;
B[ind(j, i)] = 5.4;
}
}
float *a;
float *b;
float *c;
hipMalloc((void **) &a, N * N * sizeof(float));
hipMalloc((void **) &b, N * N * sizeof(float));
hipMalloc((void **) &c, N * N * sizeof(float));
hipMemcpy(a, A, sizeof(*a) * N * N, hipMemcpyHostToDevice);
hipMemcpy(b, B, sizeof(*a) * N * N, hipMemcpyHostToDevice);
hipMemcpy(c, C, sizeof(*a) * N * N, hipMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N * N);
MatAdd<<<numBlocks, threadsPerBlock>>>(a, b, c);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%f ", c[ind(j, i)]);
}
printf("\n");
}
hipFree(a);
hipFree(b);
hipFree(c);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#define N 40
__global__ void MatAdd(float *A, float *B, float *C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
size_t ind(int x, int y) {
return y * N + x;
}
int main() {
float A[N * N];
float B[N * N];
float C[N * N];
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
A[ind(j, i)] = 3.4;
B[ind(j, i)] = 5.4;
}
}
float *a;
float *b;
float *c;
hipMalloc((void **) &a, N * N * sizeof(float));
hipMalloc((void **) &b, N * N * sizeof(float));
hipMalloc((void **) &c, N * N * sizeof(float));
hipMemcpy(a, A, sizeof(*a) * N * N, hipMemcpyHostToDevice);
hipMemcpy(b, B, sizeof(*a) * N * N, hipMemcpyHostToDevice);
hipMemcpy(c, C, sizeof(*a) * N * N, hipMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N * N);
MatAdd<<<numBlocks, threadsPerBlock>>>(a, b, c);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%f ", c[ind(j, i)]);
}
printf("\n");
}
hipFree(a);
hipFree(b);
hipFree(c);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6MatAddPfS_S_
.globl _Z6MatAddPfS_S_
.p2align 8
.type _Z6MatAddPfS_S_,@function
_Z6MatAddPfS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6MatAddPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6MatAddPfS_S_, .Lfunc_end0-_Z6MatAddPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6MatAddPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z6MatAddPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#define N 40
__global__ void MatAdd(float *A, float *B, float *C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
size_t ind(int x, int y) {
return y * N + x;
}
int main() {
float A[N * N];
float B[N * N];
float C[N * N];
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
A[ind(j, i)] = 3.4;
B[ind(j, i)] = 5.4;
}
}
float *a;
float *b;
float *c;
hipMalloc((void **) &a, N * N * sizeof(float));
hipMalloc((void **) &b, N * N * sizeof(float));
hipMalloc((void **) &c, N * N * sizeof(float));
hipMemcpy(a, A, sizeof(*a) * N * N, hipMemcpyHostToDevice);
hipMemcpy(b, B, sizeof(*a) * N * N, hipMemcpyHostToDevice);
hipMemcpy(c, C, sizeof(*a) * N * N, hipMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N * N);
MatAdd<<<numBlocks, threadsPerBlock>>>(a, b, c);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%f ", c[ind(j, i)]);
}
printf("\n");
}
hipFree(a);
hipFree(b);
hipFree(c);
} | .text
.file "test_4.hip"
.globl _Z21__device_stub__MatAddPfS_S_ # -- Begin function _Z21__device_stub__MatAddPfS_S_
.p2align 4, 0x90
.type _Z21__device_stub__MatAddPfS_S_,@function
_Z21__device_stub__MatAddPfS_S_: # @_Z21__device_stub__MatAddPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6MatAddPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z21__device_stub__MatAddPfS_S_, .Lfunc_end0-_Z21__device_stub__MatAddPfS_S_
.cfi_endproc
# -- End function
.globl _Z3indii # -- Begin function _Z3indii
.p2align 4, 0x90
.type _Z3indii,@function
_Z3indii: # @_Z3indii
.cfi_startproc
# %bb.0:
# kill: def $esi killed $esi def $rsi
# kill: def $edi killed $edi def $rdi
leal (%rsi,%rsi,4), %eax
leal (%rdi,%rax,8), %eax
cltq
retq
.Lfunc_end1:
.size _Z3indii, .Lfunc_end1-_Z3indii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $19328, %rsp # imm = 0x4B80
.cfi_def_cfa_offset 19360
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 128(%rsp), %rax
leaq 6528(%rsp), %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB2_1: # %.preheader24
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
movl $1079613850, (%rcx,%rsi,4) # imm = 0x4059999A
movl $1085066445, (%rax,%rsi,4) # imm = 0x40ACCCCD
incq %rsi
cmpq $40, %rsi
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
incq %rdx
addq $160, %rax
addq $160, %rcx
cmpq $40, %rdx
jne .LBB2_1
# %bb.4:
leaq 16(%rsp), %rdi
movl $6400, %esi # imm = 0x1900
callq hipMalloc
leaq 8(%rsp), %rdi
movl $6400, %esi # imm = 0x1900
callq hipMalloc
movq %rsp, %rdi
movl $6400, %esi # imm = 0x1900
callq hipMalloc
movq 16(%rsp), %rdi
leaq 6528(%rsp), %rsi
movl $6400, %edx # imm = 0x1900
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 128(%rsp), %rsi
movl $6400, %edx # imm = 0x1900
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
leaq 12928(%rsp), %rsi
movl $6400, %edx # imm = 0x1900
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 1599(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z6MatAddPfS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6: # %.preheader.preheader
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_7: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_8 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_8: # Parent Loop BB2_7 Depth=1
# => This Inner Loop Header: Depth=2
movq (%rsp), %rax
addq %rbx, %rax
movss (%rax,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %r15
cmpq $40, %r15
jne .LBB2_8
# %bb.9: # in Loop: Header=BB2_7 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $160, %rbx
cmpq $40, %r14
jne .LBB2_7
# %bb.10:
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $19328, %rsp # imm = 0x4B80
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6MatAddPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6MatAddPfS_S_,@object # @_Z6MatAddPfS_S_
.section .rodata,"a",@progbits
.globl _Z6MatAddPfS_S_
.p2align 3, 0x0
_Z6MatAddPfS_S_:
.quad _Z21__device_stub__MatAddPfS_S_
.size _Z6MatAddPfS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%f "
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6MatAddPfS_S_"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__MatAddPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6MatAddPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6MatAddPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*0090*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6MatAddPfS_S_
.globl _Z6MatAddPfS_S_
.p2align 8
.type _Z6MatAddPfS_S_,@function
_Z6MatAddPfS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6MatAddPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6MatAddPfS_S_, .Lfunc_end0-_Z6MatAddPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6MatAddPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z6MatAddPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001696ef_00000000-6_test_4.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z3indii
.type _Z3indii, @function
_Z3indii:
.LFB2057:
.cfi_startproc
endbr64
leal (%rsi,%rsi,4), %eax
leal (%rdi,%rax,8), %eax
cltq
ret
.cfi_endproc
.LFE2057:
.size _Z3indii, .-_Z3indii
.globl _Z29__device_stub__Z6MatAddPfS_S_PfS_S_
.type _Z29__device_stub__Z6MatAddPfS_S_PfS_S_, @function
_Z29__device_stub__Z6MatAddPfS_S_PfS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L8
.L4:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L9
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6MatAddPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L4
.L9:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z29__device_stub__Z6MatAddPfS_S_PfS_S_, .-_Z29__device_stub__Z6MatAddPfS_S_PfS_S_
.globl _Z6MatAddPfS_S_
.type _Z6MatAddPfS_S_, @function
_Z6MatAddPfS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z6MatAddPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z6MatAddPfS_S_, .-_Z6MatAddPfS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "%f "
.LC3:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
leaq -16384(%rsp), %r11
.cfi_def_cfa 11, 16424
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $2888, %rsp
.cfi_def_cfa_offset 19312
movq %fs:40, %rax
movq %rax, 19256(%rsp)
xorl %eax, %eax
leaq 48(%rsp), %rax
leaq 6448(%rsp), %rdx
movq %rdx, %rcx
movss .LC0(%rip), %xmm1
movss .LC1(%rip), %xmm0
.L13:
movl $0, %ebx
.L14:
movss %xmm1, (%rax,%rbx)
movss %xmm0, (%rdx,%rbx)
addq $4, %rbx
cmpq $160, %rbx
jne .L14
addq $160, %rax
addq $160, %rdx
cmpq %rcx, %rax
jne .L13
movq %rsp, %rdi
movl $6400, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $6400, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $6400, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $6400, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 6448(%rsp), %rsi
movl $1, %ecx
movl $6400, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 12848(%rsp), %rsi
movl $1, %ecx
movl $6400, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1600, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L24
.L16:
leaq .LC2(%rip), %r12
leaq .LC3(%rip), %r13
.L17:
leaq -160(%rbx), %rbp
.L18:
movq 16(%rsp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbp), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbp
cmpq %rbx, %rbp
jne .L18
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $160, %rbx
cmpq $6560, %rbx
jne .L17
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 19256(%rsp), %rax
subq %fs:40, %rax
jne .L25
movl $0, %eax
addq $19272, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z29__device_stub__Z6MatAddPfS_S_PfS_S_
jmp .L16
.L25:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC4:
.string "_Z6MatAddPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z6MatAddPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1079613850
.align 4
.LC1:
.long 1085066445
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "test_4.hip"
.globl _Z21__device_stub__MatAddPfS_S_ # -- Begin function _Z21__device_stub__MatAddPfS_S_
.p2align 4, 0x90
.type _Z21__device_stub__MatAddPfS_S_,@function
_Z21__device_stub__MatAddPfS_S_: # @_Z21__device_stub__MatAddPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6MatAddPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z21__device_stub__MatAddPfS_S_, .Lfunc_end0-_Z21__device_stub__MatAddPfS_S_
.cfi_endproc
# -- End function
.globl _Z3indii # -- Begin function _Z3indii
.p2align 4, 0x90
.type _Z3indii,@function
_Z3indii: # @_Z3indii
.cfi_startproc
# %bb.0:
# kill: def $esi killed $esi def $rsi
# kill: def $edi killed $edi def $rdi
leal (%rsi,%rsi,4), %eax
leal (%rdi,%rax,8), %eax
cltq
retq
.Lfunc_end1:
.size _Z3indii, .Lfunc_end1-_Z3indii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $19328, %rsp # imm = 0x4B80
.cfi_def_cfa_offset 19360
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 128(%rsp), %rax
leaq 6528(%rsp), %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB2_1: # %.preheader24
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
movl $1079613850, (%rcx,%rsi,4) # imm = 0x4059999A
movl $1085066445, (%rax,%rsi,4) # imm = 0x40ACCCCD
incq %rsi
cmpq $40, %rsi
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
incq %rdx
addq $160, %rax
addq $160, %rcx
cmpq $40, %rdx
jne .LBB2_1
# %bb.4:
leaq 16(%rsp), %rdi
movl $6400, %esi # imm = 0x1900
callq hipMalloc
leaq 8(%rsp), %rdi
movl $6400, %esi # imm = 0x1900
callq hipMalloc
movq %rsp, %rdi
movl $6400, %esi # imm = 0x1900
callq hipMalloc
movq 16(%rsp), %rdi
leaq 6528(%rsp), %rsi
movl $6400, %edx # imm = 0x1900
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 128(%rsp), %rsi
movl $6400, %edx # imm = 0x1900
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
leaq 12928(%rsp), %rsi
movl $6400, %edx # imm = 0x1900
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 1599(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z6MatAddPfS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6: # %.preheader.preheader
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_7: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_8 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_8: # Parent Loop BB2_7 Depth=1
# => This Inner Loop Header: Depth=2
movq (%rsp), %rax
addq %rbx, %rax
movss (%rax,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %r15
cmpq $40, %r15
jne .LBB2_8
# %bb.9: # in Loop: Header=BB2_7 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $160, %rbx
cmpq $40, %r14
jne .LBB2_7
# %bb.10:
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $19328, %rsp # imm = 0x4B80
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6MatAddPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6MatAddPfS_S_,@object # @_Z6MatAddPfS_S_
.section .rodata,"a",@progbits
.globl _Z6MatAddPfS_S_
.p2align 3, 0x0
_Z6MatAddPfS_S_:
.quad _Z21__device_stub__MatAddPfS_S_
.size _Z6MatAddPfS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%f "
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6MatAddPfS_S_"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__MatAddPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6MatAddPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h> // Reference???
const int N = 1024; const int blocksize = 16;
__global__ void add_matrix( float *a, float *b, float *c, int N, float rf, float pirkplus1) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j*N;
if ( i < N && j < N )
c[index] = rf*__sinf((float)(i+1)*(float)(j+1)*pirkplus1);
}
int main(void){
float *a = new float[N*N]; float *b = new float[N*N]; float *c = new float[N*N];
float two=2.0f, one=1.0f;
float pi,rkplus1,rf; // Generate square orthonormal matrices
pi = two * asin(one);
rkplus1 = one/(float(N) + one);
rf = sqrt(two*rkplus1);
for ( int i = 0; i < N*N; ++i ) {
a[i] = 1.0f; b[i] = 3.5f; }
float *ad, *bd, *cd;
const int size = N*N*sizeof(float);
cudaMalloc( (void**)&ad, size );
cudaMalloc( (void**)&bd, size );
cudaMalloc( (void**)&cd, size );
cudaMemcpy( ad, a, size, cudaMemcpyHostToDevice ); // COPY DATA TO GPU
cudaMemcpy( bd, b, size, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, blocksize );
dim3 dimGrid( N/dimBlock.x, N/dimBlock.y );
add_matrix<<<dimGrid, dimBlock>>>( ad, bd, cd, N, rf, pi*rkplus1 );
cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
for (int i = 0; i < 10;i++) {
printf(" %7.5f", c[i]);
}
printf("\n");
cudaFree( ad ); cudaFree( bd ); cudaFree( cd ); // CLEAN UP, RETURN
return 0;
} | code for sm_80
Function : _Z10add_matrixPfS_S_iff
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x178], PT ; /* 0x00005e0003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IADD3 R4, R0, 0x1, RZ ; /* 0x0000000100047810 */
/* 0x000fe20007ffe0ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ IADD3 R5, R3.reuse, 0x1, RZ ; /* 0x0000000103057810 */
/* 0x040fe20007ffe0ff */
/*00d0*/ IMAD R3, R3, c[0x0][0x178], R0 ; /* 0x00005e0003037a24 */
/* 0x000fc600078e0200 */
/*00e0*/ I2F R4, R4 ; /* 0x0000000400047306 */
/* 0x000ff00000201400 */
/*00f0*/ I2F R5, R5 ; /* 0x0000000500057306 */
/* 0x000e240000201400 */
/*0100*/ FMUL R2, R4, R5 ; /* 0x0000000504027220 */
/* 0x001fc80000400000 */
/*0110*/ FMUL R2, R2, c[0x0][0x180] ; /* 0x0000600002027a20 */
/* 0x000fc80000400000 */
/*0120*/ FMUL.RZ R6, R2, 0.15915493667125701904 ; /* 0x3e22f98302067820 */
/* 0x000fe2000040c000 */
/*0130*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fca00000001ff */
/*0140*/ MUFU.SIN R6, R6 ; /* 0x0000000600067308 */
/* 0x000e2a0000000400 */
/*0150*/ IMAD.WIDE R2, R3, R2, c[0x0][0x170] ; /* 0x00005c0003027625 */
/* 0x000fc800078e0202 */
/*0160*/ FMUL R7, R6, c[0x0][0x17c] ; /* 0x00005f0006077a20 */
/* 0x001fca0000400000 */
/*0170*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101904 */
/*0180*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0190*/ BRA 0x190; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h> // Reference???
const int N = 1024; const int blocksize = 16;
__global__ void add_matrix( float *a, float *b, float *c, int N, float rf, float pirkplus1) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j*N;
if ( i < N && j < N )
c[index] = rf*__sinf((float)(i+1)*(float)(j+1)*pirkplus1);
}
int main(void){
float *a = new float[N*N]; float *b = new float[N*N]; float *c = new float[N*N];
float two=2.0f, one=1.0f;
float pi,rkplus1,rf; // Generate square orthonormal matrices
pi = two * asin(one);
rkplus1 = one/(float(N) + one);
rf = sqrt(two*rkplus1);
for ( int i = 0; i < N*N; ++i ) {
a[i] = 1.0f; b[i] = 3.5f; }
float *ad, *bd, *cd;
const int size = N*N*sizeof(float);
cudaMalloc( (void**)&ad, size );
cudaMalloc( (void**)&bd, size );
cudaMalloc( (void**)&cd, size );
cudaMemcpy( ad, a, size, cudaMemcpyHostToDevice ); // COPY DATA TO GPU
cudaMemcpy( bd, b, size, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, blocksize );
dim3 dimGrid( N/dimBlock.x, N/dimBlock.y );
add_matrix<<<dimGrid, dimBlock>>>( ad, bd, cd, N, rf, pi*rkplus1 );
cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
for (int i = 0; i < 10;i++) {
printf(" %7.5f", c[i]);
}
printf("\n");
cudaFree( ad ); cudaFree( bd ); cudaFree( cd ); // CLEAN UP, RETURN
return 0;
} | .file "tmpxft_0003d6b7_00000000-6_matorthog.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z10add_matrixPfS_S_iffPfS_S_iff
.type _Z37__device_stub__Z10add_matrixPfS_S_iffPfS_S_iff, @function
_Z37__device_stub__Z10add_matrixPfS_S_iffPfS_S_iff:
.LFB2082:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movss %xmm0, 16(%rsp)
movss %xmm1, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z10add_matrixPfS_S_iff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z37__device_stub__Z10add_matrixPfS_S_iffPfS_S_iff, .-_Z37__device_stub__Z10add_matrixPfS_S_iffPfS_S_iff
.globl _Z10add_matrixPfS_S_iff
.type _Z10add_matrixPfS_S_iff, @function
_Z10add_matrixPfS_S_iff:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z10add_matrixPfS_S_iffPfS_S_iff
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10add_matrixPfS_S_iff, .-_Z10add_matrixPfS_S_iff
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string " %7.5f"
.LC5:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbp
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbx
movl $4194304, %edi
call _Znam@PLT
movq %rax, %r12
movl $0, %eax
movss .LC0(%rip), %xmm1
movss .LC1(%rip), %xmm0
.L12:
movss %xmm1, 0(%rbp,%rax)
movss %xmm0, (%rbx,%rax)
addq $4, %rax
cmpq $4194304, %rax
jne .L12
leaq 8(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $4194304, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4194304, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $64, 44(%rsp)
movl $64, 48(%rsp)
movl $16, 32(%rsp)
movl $16, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movl $1, %ecx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq %r12, %rbx
addq $40, %r12
leaq .LC4(%rip), %rbp
.L14:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L14
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movss .LC2(%rip), %xmm1
movss .LC3(%rip), %xmm0
movl $1024, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z37__device_stub__Z10add_matrixPfS_S_iffPfS_S_iff
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z10add_matrixPfS_S_iff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z10add_matrixPfS_S_iff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.align 4
.LC1:
.long 1080033280
.align 4
.LC2:
.long 994631076
.align 4
.LC3:
.long 1026879063
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h> // Reference???
const int N = 1024; const int blocksize = 16;
__global__ void add_matrix( float *a, float *b, float *c, int N, float rf, float pirkplus1) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j*N;
if ( i < N && j < N )
c[index] = rf*__sinf((float)(i+1)*(float)(j+1)*pirkplus1);
}
int main(void){
float *a = new float[N*N]; float *b = new float[N*N]; float *c = new float[N*N];
float two=2.0f, one=1.0f;
float pi,rkplus1,rf; // Generate square orthonormal matrices
pi = two * asin(one);
rkplus1 = one/(float(N) + one);
rf = sqrt(two*rkplus1);
for ( int i = 0; i < N*N; ++i ) {
a[i] = 1.0f; b[i] = 3.5f; }
float *ad, *bd, *cd;
const int size = N*N*sizeof(float);
cudaMalloc( (void**)&ad, size );
cudaMalloc( (void**)&bd, size );
cudaMalloc( (void**)&cd, size );
cudaMemcpy( ad, a, size, cudaMemcpyHostToDevice ); // COPY DATA TO GPU
cudaMemcpy( bd, b, size, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, blocksize );
dim3 dimGrid( N/dimBlock.x, N/dimBlock.y );
add_matrix<<<dimGrid, dimBlock>>>( ad, bd, cd, N, rf, pi*rkplus1 );
cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
for (int i = 0; i < 10;i++) {
printf(" %7.5f", c[i]);
}
printf("\n");
cudaFree( ad ); cudaFree( bd ); cudaFree( cd ); // CLEAN UP, RETURN
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h> // Reference???
const int N = 1024; const int blocksize = 16;
__global__ void add_matrix( float *a, float *b, float *c, int N, float rf, float pirkplus1) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j*N;
if ( i < N && j < N )
c[index] = rf*__sinf((float)(i+1)*(float)(j+1)*pirkplus1);
}
int main(void){
float *a = new float[N*N]; float *b = new float[N*N]; float *c = new float[N*N];
float two=2.0f, one=1.0f;
float pi,rkplus1,rf; // Generate square orthonormal matrices
pi = two * asin(one);
rkplus1 = one/(float(N) + one);
rf = sqrt(two*rkplus1);
for ( int i = 0; i < N*N; ++i ) {
a[i] = 1.0f; b[i] = 3.5f; }
float *ad, *bd, *cd;
const int size = N*N*sizeof(float);
hipMalloc( (void**)&ad, size );
hipMalloc( (void**)&bd, size );
hipMalloc( (void**)&cd, size );
hipMemcpy( ad, a, size, hipMemcpyHostToDevice ); // COPY DATA TO GPU
hipMemcpy( bd, b, size, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, blocksize );
dim3 dimGrid( N/dimBlock.x, N/dimBlock.y );
add_matrix<<<dimGrid, dimBlock>>>( ad, bd, cd, N, rf, pi*rkplus1 );
hipMemcpy( c, cd, size, hipMemcpyDeviceToHost );
for (int i = 0; i < 10;i++) {
printf(" %7.5f", c[i]);
}
printf("\n");
hipFree( ad ); hipFree( bd ); hipFree( cd ); // CLEAN UP, RETURN
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h> // Reference???
const int N = 1024; const int blocksize = 16;
__global__ void add_matrix( float *a, float *b, float *c, int N, float rf, float pirkplus1) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j*N;
if ( i < N && j < N )
c[index] = rf*__sinf((float)(i+1)*(float)(j+1)*pirkplus1);
}
int main(void){
float *a = new float[N*N]; float *b = new float[N*N]; float *c = new float[N*N];
float two=2.0f, one=1.0f;
float pi,rkplus1,rf; // Generate square orthonormal matrices
pi = two * asin(one);
rkplus1 = one/(float(N) + one);
rf = sqrt(two*rkplus1);
for ( int i = 0; i < N*N; ++i ) {
a[i] = 1.0f; b[i] = 3.5f; }
float *ad, *bd, *cd;
const int size = N*N*sizeof(float);
hipMalloc( (void**)&ad, size );
hipMalloc( (void**)&bd, size );
hipMalloc( (void**)&cd, size );
hipMemcpy( ad, a, size, hipMemcpyHostToDevice ); // COPY DATA TO GPU
hipMemcpy( bd, b, size, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, blocksize );
dim3 dimGrid( N/dimBlock.x, N/dimBlock.y );
add_matrix<<<dimGrid, dimBlock>>>( ad, bd, cd, N, rf, pi*rkplus1 );
hipMemcpy( c, cd, size, hipMemcpyDeviceToHost );
for (int i = 0; i < 10;i++) {
printf(" %7.5f", c[i]);
}
printf("\n");
hipFree( ad ); hipFree( bd ); hipFree( cd ); // CLEAN UP, RETURN
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10add_matrixPfS_S_iff
.globl _Z10add_matrixPfS_S_iff
.p2align 8
.type _Z10add_matrixPfS_S_iff,@function
_Z10add_matrixPfS_S_iff:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x34
s_load_b32 s2, s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s3, 0xffff
s_lshr_b32 s3, s3, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s3, v[3:4]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB0_2
s_load_b64 s[4:5], s[0:1], 0x1c
v_add_nc_u32_e32 v2, 1, v0
v_add_nc_u32_e32 v3, 1, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cvt_f32_i32_e32 v2, v2
v_cvt_f32_i32_e32 v3, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_f32_e32 v2, v2, v3
s_waitcnt lgkmcnt(0)
v_mul_f32_e32 v4, s5, v2
v_mad_u64_u32 v[2:3], null, v1, s2, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_f32_e32 v0, 0.15915494, v4
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sin_f32_e32 v4, v0
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_1)
v_add_co_u32 v0, vcc_lo, s0, v0
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, s4, v4
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10add_matrixPfS_S_iff
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10add_matrixPfS_S_iff, .Lfunc_end0-_Z10add_matrixPfS_S_iff
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10add_matrixPfS_S_iff
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10add_matrixPfS_S_iff.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h> // Reference???
const int N = 1024; const int blocksize = 16;
__global__ void add_matrix( float *a, float *b, float *c, int N, float rf, float pirkplus1) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j*N;
if ( i < N && j < N )
c[index] = rf*__sinf((float)(i+1)*(float)(j+1)*pirkplus1);
}
int main(void){
float *a = new float[N*N]; float *b = new float[N*N]; float *c = new float[N*N];
float two=2.0f, one=1.0f;
float pi,rkplus1,rf; // Generate square orthonormal matrices
pi = two * asin(one);
rkplus1 = one/(float(N) + one);
rf = sqrt(two*rkplus1);
for ( int i = 0; i < N*N; ++i ) {
a[i] = 1.0f; b[i] = 3.5f; }
float *ad, *bd, *cd;
const int size = N*N*sizeof(float);
hipMalloc( (void**)&ad, size );
hipMalloc( (void**)&bd, size );
hipMalloc( (void**)&cd, size );
hipMemcpy( ad, a, size, hipMemcpyHostToDevice ); // COPY DATA TO GPU
hipMemcpy( bd, b, size, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, blocksize );
dim3 dimGrid( N/dimBlock.x, N/dimBlock.y );
add_matrix<<<dimGrid, dimBlock>>>( ad, bd, cd, N, rf, pi*rkplus1 );
hipMemcpy( c, cd, size, hipMemcpyDeviceToHost );
for (int i = 0; i < 10;i++) {
printf(" %7.5f", c[i]);
}
printf("\n");
hipFree( ad ); hipFree( bd ); hipFree( cd ); // CLEAN UP, RETURN
return 0;
} | .text
.file "matorthog.hip"
.globl _Z25__device_stub__add_matrixPfS_S_iff # -- Begin function _Z25__device_stub__add_matrixPfS_S_iff
.p2align 4, 0x90
.type _Z25__device_stub__add_matrixPfS_S_iff,@function
_Z25__device_stub__add_matrixPfS_S_iff: # @_Z25__device_stub__add_matrixPfS_S_iff
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movss %xmm0, 16(%rsp)
movss %xmm1, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z10add_matrixPfS_S_iff, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z25__device_stub__add_matrixPfS_S_iff, .Lfunc_end0-_Z25__device_stub__add_matrixPfS_S_iff
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $160, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %r15
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %r14
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %rbx
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%r15,%rax,4) # imm = 0x3F800000
movl $1080033280, (%r14,%rax,4) # imm = 0x40600000
incq %rax
cmpq $1048576, %rax # imm = 0x100000
jne .LBB1_1
# %bb.2:
leaq 16(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
movq %rsp, %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
movq 16(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $274877907008, %rdi # imm = 0x4000000040
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $1024, 36(%rsp) # imm = 0x400
movl $1026879063, 32(%rsp) # imm = 0x3D34EE57
movl $994631076, 28(%rsp) # imm = 0x3B48DDA4
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 32(%rsp), %rax
movq %rax, 144(%rsp)
leaq 28(%rsp), %rax
movq %rax, 152(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z10add_matrixPfS_S_iff, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %r14
cmpq $10, %r14
jne .LBB1_5
# %bb.6:
movl $10, %edi
callq putchar@PLT
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $160, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10add_matrixPfS_S_iff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10add_matrixPfS_S_iff,@object # @_Z10add_matrixPfS_S_iff
.section .rodata,"a",@progbits
.globl _Z10add_matrixPfS_S_iff
.p2align 3, 0x0
_Z10add_matrixPfS_S_iff:
.quad _Z25__device_stub__add_matrixPfS_S_iff
.size _Z10add_matrixPfS_S_iff, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " %7.5f"
.size .L.str, 7
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10add_matrixPfS_S_iff"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__add_matrixPfS_S_iff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10add_matrixPfS_S_iff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10add_matrixPfS_S_iff
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x178], PT ; /* 0x00005e0003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IADD3 R4, R0, 0x1, RZ ; /* 0x0000000100047810 */
/* 0x000fe20007ffe0ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ IADD3 R5, R3.reuse, 0x1, RZ ; /* 0x0000000103057810 */
/* 0x040fe20007ffe0ff */
/*00d0*/ IMAD R3, R3, c[0x0][0x178], R0 ; /* 0x00005e0003037a24 */
/* 0x000fc600078e0200 */
/*00e0*/ I2F R4, R4 ; /* 0x0000000400047306 */
/* 0x000ff00000201400 */
/*00f0*/ I2F R5, R5 ; /* 0x0000000500057306 */
/* 0x000e240000201400 */
/*0100*/ FMUL R2, R4, R5 ; /* 0x0000000504027220 */
/* 0x001fc80000400000 */
/*0110*/ FMUL R2, R2, c[0x0][0x180] ; /* 0x0000600002027a20 */
/* 0x000fc80000400000 */
/*0120*/ FMUL.RZ R6, R2, 0.15915493667125701904 ; /* 0x3e22f98302067820 */
/* 0x000fe2000040c000 */
/*0130*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fca00000001ff */
/*0140*/ MUFU.SIN R6, R6 ; /* 0x0000000600067308 */
/* 0x000e2a0000000400 */
/*0150*/ IMAD.WIDE R2, R3, R2, c[0x0][0x170] ; /* 0x00005c0003027625 */
/* 0x000fc800078e0202 */
/*0160*/ FMUL R7, R6, c[0x0][0x17c] ; /* 0x00005f0006077a20 */
/* 0x001fca0000400000 */
/*0170*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101904 */
/*0180*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0190*/ BRA 0x190; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10add_matrixPfS_S_iff
.globl _Z10add_matrixPfS_S_iff
.p2align 8
.type _Z10add_matrixPfS_S_iff,@function
_Z10add_matrixPfS_S_iff:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x34
s_load_b32 s2, s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s3, 0xffff
s_lshr_b32 s3, s3, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s3, v[3:4]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB0_2
s_load_b64 s[4:5], s[0:1], 0x1c
v_add_nc_u32_e32 v2, 1, v0
v_add_nc_u32_e32 v3, 1, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cvt_f32_i32_e32 v2, v2
v_cvt_f32_i32_e32 v3, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_f32_e32 v2, v2, v3
s_waitcnt lgkmcnt(0)
v_mul_f32_e32 v4, s5, v2
v_mad_u64_u32 v[2:3], null, v1, s2, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_f32_e32 v0, 0.15915494, v4
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sin_f32_e32 v4, v0
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_1)
v_add_co_u32 v0, vcc_lo, s0, v0
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, s4, v4
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10add_matrixPfS_S_iff
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10add_matrixPfS_S_iff, .Lfunc_end0-_Z10add_matrixPfS_S_iff
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10add_matrixPfS_S_iff
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10add_matrixPfS_S_iff.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0003d6b7_00000000-6_matorthog.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z10add_matrixPfS_S_iffPfS_S_iff
.type _Z37__device_stub__Z10add_matrixPfS_S_iffPfS_S_iff, @function
_Z37__device_stub__Z10add_matrixPfS_S_iffPfS_S_iff:
.LFB2082:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movss %xmm0, 16(%rsp)
movss %xmm1, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z10add_matrixPfS_S_iff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z37__device_stub__Z10add_matrixPfS_S_iffPfS_S_iff, .-_Z37__device_stub__Z10add_matrixPfS_S_iffPfS_S_iff
.globl _Z10add_matrixPfS_S_iff
.type _Z10add_matrixPfS_S_iff, @function
_Z10add_matrixPfS_S_iff:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z10add_matrixPfS_S_iffPfS_S_iff
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10add_matrixPfS_S_iff, .-_Z10add_matrixPfS_S_iff
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string " %7.5f"
.LC5:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbp
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbx
movl $4194304, %edi
call _Znam@PLT
movq %rax, %r12
movl $0, %eax
movss .LC0(%rip), %xmm1
movss .LC1(%rip), %xmm0
.L12:
movss %xmm1, 0(%rbp,%rax)
movss %xmm0, (%rbx,%rax)
addq $4, %rax
cmpq $4194304, %rax
jne .L12
leaq 8(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $4194304, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4194304, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $64, 44(%rsp)
movl $64, 48(%rsp)
movl $16, 32(%rsp)
movl $16, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movl $1, %ecx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq %r12, %rbx
addq $40, %r12
leaq .LC4(%rip), %rbp
.L14:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L14
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movss .LC2(%rip), %xmm1
movss .LC3(%rip), %xmm0
movl $1024, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z37__device_stub__Z10add_matrixPfS_S_iffPfS_S_iff
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z10add_matrixPfS_S_iff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z10add_matrixPfS_S_iff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.align 4
.LC1:
.long 1080033280
.align 4
.LC2:
.long 994631076
.align 4
.LC3:
.long 1026879063
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "matorthog.hip"
.globl _Z25__device_stub__add_matrixPfS_S_iff # -- Begin function _Z25__device_stub__add_matrixPfS_S_iff
.p2align 4, 0x90
.type _Z25__device_stub__add_matrixPfS_S_iff,@function
_Z25__device_stub__add_matrixPfS_S_iff: # @_Z25__device_stub__add_matrixPfS_S_iff
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movss %xmm0, 16(%rsp)
movss %xmm1, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z10add_matrixPfS_S_iff, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z25__device_stub__add_matrixPfS_S_iff, .Lfunc_end0-_Z25__device_stub__add_matrixPfS_S_iff
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $160, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %r15
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %r14
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %rbx
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%r15,%rax,4) # imm = 0x3F800000
movl $1080033280, (%r14,%rax,4) # imm = 0x40600000
incq %rax
cmpq $1048576, %rax # imm = 0x100000
jne .LBB1_1
# %bb.2:
leaq 16(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
movq %rsp, %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
movq 16(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $274877907008, %rdi # imm = 0x4000000040
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $1024, 36(%rsp) # imm = 0x400
movl $1026879063, 32(%rsp) # imm = 0x3D34EE57
movl $994631076, 28(%rsp) # imm = 0x3B48DDA4
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 32(%rsp), %rax
movq %rax, 144(%rsp)
leaq 28(%rsp), %rax
movq %rax, 152(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z10add_matrixPfS_S_iff, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %r14
cmpq $10, %r14
jne .LBB1_5
# %bb.6:
movl $10, %edi
callq putchar@PLT
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $160, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10add_matrixPfS_S_iff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10add_matrixPfS_S_iff,@object # @_Z10add_matrixPfS_S_iff
.section .rodata,"a",@progbits
.globl _Z10add_matrixPfS_S_iff
.p2align 3, 0x0
_Z10add_matrixPfS_S_iff:
.quad _Z25__device_stub__add_matrixPfS_S_iff
.size _Z10add_matrixPfS_S_iff, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " %7.5f"
.size .L.str, 7
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10add_matrixPfS_S_iff"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__add_matrixPfS_S_iff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10add_matrixPfS_S_iff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /**
* 3mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#define GPU_DEVICE 0
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
/* Problem size. */
//# define NI 512
//# define NJ 512
//# define NK 512
//# define NL 512
//# define NM 512
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
int NI;
int NJ;
int NK;
int NL;
int NM;
void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D)
{
int i, j;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NK; j++)
{
A[i*NK + j] = ((DATA_TYPE) i*j) / NI;
}
}
for (i = 0; i < NK; i++)
{
for (j = 0; j < NJ; j++)
{
B[i*NJ + j] = ((DATA_TYPE) i*(j+1)) / NJ;
}
}
for (i = 0; i < NJ; i++)
{
for (j = 0; j < NM; j++)
{
C[i*NM + j] = ((DATA_TYPE) i*(j+3)) / NL;
}
}
for (i = 0; i < NM; i++)
{
for (j = 0; j < NL; j++)
{
D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK;
}
}
}
__global__ void mm3_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *E,int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NJ))
{
int k;
for(k=0; k < NK; k++)
{
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
__global__ void mm3_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *F,int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NJ) && (j < NL))
{
int k;
for(k=0; k < NM; k++)
{
F[i * NL + j] += C[i * NM + k] * D[k * NL +j];
}
}
}
__global__ void mm3_kernel3(DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G, int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NL))
{
int k;
for(k=0; k < NJ; k++)
{
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
void mm3Cuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu, DATA_TYPE* D_gpu, DATA_TYPE* E_gpu, DATA_TYPE* F_gpu,
DATA_TYPE* G_gpu, int NI, int NJ, int NK, int NL, int NM)
{
cudaEvent_t start, end;
float time;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NJ) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid2((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NJ/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid3((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
//for(int i = 0; i < 5; i++){
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
mm3_kernel1<<<grid1,block>>>(A_gpu, B_gpu, E_gpu,NI, NJ, NK, NL, NM);
cudaDeviceSynchronize();
mm3_kernel2<<<grid2,block>>>(C_gpu, D_gpu, F_gpu,NI, NJ, NK, NL, NM);
cudaDeviceSynchronize();
mm3_kernel3<<<grid3,block>>>(E_gpu, F_gpu, G_gpu,NI, NJ, NK, NL, NM);
cudaDeviceSynchronize();
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, start, end);
fprintf(stdout, "%0.6lf\n", time);
//}
}
int main(int argc, char** argv)
{
if(argc < 2){
printf("please no troll\n");
return 1;
}
NI = atoi(argv[1]);
NJ = atoi(argv[1]);
NK = atoi(argv[1]);
NL = atoi(argv[1]);
NM = atoi(argv[1]);
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* D;
DATA_TYPE* E;
DATA_TYPE* F;
DATA_TYPE* G;
cudaMallocManaged(&A, NI*NK*sizeof(DATA_TYPE));
cudaMallocManaged(&B, NK*NJ*sizeof(DATA_TYPE));
cudaMallocManaged(&C, NJ*NM*sizeof(DATA_TYPE));
cudaMallocManaged(&D, NM*NL*sizeof(DATA_TYPE));
cudaMallocManaged(&E, NI*NJ*sizeof(DATA_TYPE));
cudaMallocManaged(&F, NJ*NL*sizeof(DATA_TYPE));
cudaMallocManaged(&G, NI*NL*sizeof(DATA_TYPE));
init_array(A, B, C, D);
mm3Cuda(A, B, C, D, E, F, G, NI, NJ, NK, NL, NM);
cudaFree(A);
cudaFree(B);
cudaFree(C);
cudaFree(D);
cudaFree(E);
cudaFree(F);
cudaFree(G);
return 0;
} | .file "tmpxft_00032350_00000000-6_3mm_managed.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2075:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2075:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10init_arrayPfS_S_S_
.type _Z10init_arrayPfS_S_S_, @function
_Z10init_arrayPfS_S_S_:
.LFB2070:
.cfi_startproc
endbr64
movq %rdi, %r8
movq %rsi, %rdi
movq %rdx, %rsi
movq %rcx, %rdx
movl $0, %r9d
cmpl $0, NI(%rip)
jg .L4
.L5:
movl $0, %r10d
cmpl $0, NK(%rip)
jg .L8
.L9:
movl $0, %r8d
cmpl $0, NJ(%rip)
jg .L12
.L13:
movl $0, %edi
cmpl $0, NM(%rip)
jg .L16
ret
.L6:
imull %r9d, %eax
addl %ecx, %eax
cltq
pxor %xmm0, %xmm0
cvtsi2ssl %ecx, %xmm0
mulss %xmm2, %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl NI(%rip), %xmm1
divss %xmm1, %xmm0
movss %xmm0, (%r8,%rax,4)
addl $1, %ecx
movl NK(%rip), %eax
cmpl %ecx, %eax
jg .L6
.L7:
addl $1, %r9d
cmpl %r9d, NI(%rip)
jle .L5
.L4:
movl NK(%rip), %eax
movl $0, %ecx
testl %eax, %eax
jle .L7
pxor %xmm2, %xmm2
cvtsi2ssl %r9d, %xmm2
jmp .L6
.L10:
movl %ecx, %r9d
addl $1, %ecx
movl %r8d, %eax
imull %r10d, %eax
addl %r9d, %eax
cltq
pxor %xmm0, %xmm0
cvtsi2ssl %ecx, %xmm0
mulss %xmm2, %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl %r8d, %xmm1
divss %xmm1, %xmm0
movss %xmm0, (%rdi,%rax,4)
movl NJ(%rip), %r8d
cmpl %r8d, %ecx
jl .L10
.L11:
addl $1, %r10d
cmpl %r10d, NK(%rip)
jle .L9
.L8:
movl NJ(%rip), %r8d
movl $0, %ecx
testl %r8d, %r8d
jle .L11
pxor %xmm2, %xmm2
cvtsi2ssl %r10d, %xmm2
jmp .L10
.L14:
imull %r8d, %eax
addl %ecx, %eax
cltq
leal 3(%rcx), %edi
pxor %xmm0, %xmm0
cvtsi2ssl %edi, %xmm0
mulss %xmm2, %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl NL(%rip), %xmm1
divss %xmm1, %xmm0
movss %xmm0, (%rsi,%rax,4)
addl $1, %ecx
movl NM(%rip), %eax
cmpl %ecx, %eax
jg .L14
.L15:
addl $1, %r8d
cmpl %r8d, NJ(%rip)
jle .L13
.L12:
movl NM(%rip), %eax
movl $0, %ecx
testl %eax, %eax
jle .L15
pxor %xmm2, %xmm2
cvtsi2ssl %r8d, %xmm2
jmp .L14
.L18:
imull %edi, %eax
addl %ecx, %eax
cltq
leal 2(%rcx), %esi
pxor %xmm0, %xmm0
cvtsi2ssl %esi, %xmm0
mulss %xmm2, %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl NK(%rip), %xmm1
divss %xmm1, %xmm0
movss %xmm0, (%rdx,%rax,4)
addl $1, %ecx
movl NL(%rip), %eax
cmpl %ecx, %eax
jg .L18
.L19:
addl $1, %edi
cmpl %edi, NM(%rip)
jle .L3
.L16:
movl NL(%rip), %eax
movl $0, %ecx
testl %eax, %eax
jle .L19
pxor %xmm2, %xmm2
cvtsi2ssl %edi, %xmm2
jmp .L18
.L3:
ret
.cfi_endproc
.LFE2070:
.size _Z10init_arrayPfS_S_S_, .-_Z10init_arrayPfS_S_S_
.globl _Z40__device_stub__Z11mm3_kernel1PfS_S_iiiiiPfS_S_iiiii
.type _Z40__device_stub__Z11mm3_kernel1PfS_S_iiiiiPfS_S_iiiii, @function
_Z40__device_stub__Z11mm3_kernel1PfS_S_iiiiiPfS_S_iiiii:
.LFB2097:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L44
.L40:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L45
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L44:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z11mm3_kernel1PfS_S_iiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L40
.L45:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2097:
.size _Z40__device_stub__Z11mm3_kernel1PfS_S_iiiiiPfS_S_iiiii, .-_Z40__device_stub__Z11mm3_kernel1PfS_S_iiiiiPfS_S_iiiii
.globl _Z11mm3_kernel1PfS_S_iiiii
.type _Z11mm3_kernel1PfS_S_iiiii, @function
_Z11mm3_kernel1PfS_S_iiiii:
.LFB2098:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z40__device_stub__Z11mm3_kernel1PfS_S_iiiiiPfS_S_iiiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _Z11mm3_kernel1PfS_S_iiiii, .-_Z11mm3_kernel1PfS_S_iiiii
.globl _Z40__device_stub__Z11mm3_kernel2PfS_S_iiiiiPfS_S_iiiii
.type _Z40__device_stub__Z11mm3_kernel2PfS_S_iiiiiPfS_S_iiiii, @function
_Z40__device_stub__Z11mm3_kernel2PfS_S_iiiiiPfS_S_iiiii:
.LFB2099:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L52
.L48:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L53
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L52:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z11mm3_kernel2PfS_S_iiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L48
.L53:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2099:
.size _Z40__device_stub__Z11mm3_kernel2PfS_S_iiiiiPfS_S_iiiii, .-_Z40__device_stub__Z11mm3_kernel2PfS_S_iiiiiPfS_S_iiiii
.globl _Z11mm3_kernel2PfS_S_iiiii
.type _Z11mm3_kernel2PfS_S_iiiii, @function
_Z11mm3_kernel2PfS_S_iiiii:
.LFB2100:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z40__device_stub__Z11mm3_kernel2PfS_S_iiiiiPfS_S_iiiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2100:
.size _Z11mm3_kernel2PfS_S_iiiii, .-_Z11mm3_kernel2PfS_S_iiiii
.globl _Z40__device_stub__Z11mm3_kernel3PfS_S_iiiiiPfS_S_iiiii
.type _Z40__device_stub__Z11mm3_kernel3PfS_S_iiiiiPfS_S_iiiii, @function
_Z40__device_stub__Z11mm3_kernel3PfS_S_iiiiiPfS_S_iiiii:
.LFB2101:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L60
.L56:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L61
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L60:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z11mm3_kernel3PfS_S_iiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L56
.L61:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2101:
.size _Z40__device_stub__Z11mm3_kernel3PfS_S_iiiiiPfS_S_iiiii, .-_Z40__device_stub__Z11mm3_kernel3PfS_S_iiiiiPfS_S_iiiii
.globl _Z11mm3_kernel3PfS_S_iiiii
.type _Z11mm3_kernel3PfS_S_iiiii, @function
_Z11mm3_kernel3PfS_S_iiiii:
.LFB2102:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z40__device_stub__Z11mm3_kernel3PfS_S_iiiiiPfS_S_iiiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2102:
.size _Z11mm3_kernel3PfS_S_iiiii, .-_Z11mm3_kernel3PfS_S_iiiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC6:
.string "%0.6lf\n"
.text
.globl _Z7mm3CudaPfS_S_S_S_S_S_iiiii
.type _Z7mm3CudaPfS_S_S_S_S_S_iiiii, @function
_Z7mm3CudaPfS_S_S_S_S_S_iiiii:
.LFB2071:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %rdi, %r13
movq %rsi, %r14
movq %rdx, 8(%rsp)
movq %rcx, %r12
movq %r8, %rbx
movq %r9, %rbp
movq 160(%rsp), %r15
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $32, 40(%rsp)
movl $8, 44(%rsp)
movl $1, 48(%rsp)
pxor %xmm0, %xmm0
cvtsi2ssl 168(%rsp), %xmm0
mulss .LC0(%rip), %xmm0
movaps %xmm0, %xmm1
movss .LC7(%rip), %xmm3
movaps %xmm0, %xmm2
andps %xmm3, %xmm2
movss .LC1(%rip), %xmm4
ucomiss %xmm2, %xmm4
jbe .L65
cvttss2sil %xmm0, %eax
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
cmpnless %xmm2, %xmm1
movss .LC3(%rip), %xmm4
andps %xmm4, %xmm1
addss %xmm2, %xmm1
andnps %xmm0, %xmm3
orps %xmm3, %xmm1
.L65:
comiss .LC4(%rip), %xmm1
jnb .L66
cvttss2siq %xmm1, %rax
.L67:
pxor %xmm0, %xmm0
cvtsi2ssl 176(%rsp), %xmm0
movaps %xmm0, %xmm3
mulss .LC5(%rip), %xmm3
movaps %xmm3, %xmm1
movss .LC7(%rip), %xmm4
movaps %xmm3, %xmm2
andps %xmm4, %xmm2
movss .LC1(%rip), %xmm5
ucomiss %xmm2, %xmm5
jbe .L68
cvttss2sil %xmm3, %edx
pxor %xmm2, %xmm2
cvtsi2ssl %edx, %xmm2
cmpnless %xmm2, %xmm1
movss .LC3(%rip), %xmm5
andps %xmm5, %xmm1
addss %xmm2, %xmm1
andnps %xmm3, %xmm4
orps %xmm4, %xmm1
.L68:
comiss .LC4(%rip), %xmm1
jnb .L69
cvttss2siq %xmm1, %rdx
.L70:
movl %edx, 52(%rsp)
movl %eax, 56(%rsp)
movl $1, 60(%rsp)
mulss .LC0(%rip), %xmm0
movaps %xmm0, %xmm1
movss .LC7(%rip), %xmm3
movaps %xmm0, %xmm2
andps %xmm3, %xmm2
movss .LC1(%rip), %xmm4
ucomiss %xmm2, %xmm4
jbe .L71
cvttss2sil %xmm0, %edx
pxor %xmm2, %xmm2
cvtsi2ssl %edx, %xmm2
cmpnless %xmm2, %xmm1
movss .LC3(%rip), %xmm4
andps %xmm4, %xmm1
addss %xmm2, %xmm1
andnps %xmm0, %xmm3
orps %xmm3, %xmm1
.L71:
movaps %xmm1, %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl 192(%rsp), %xmm1
mulss .LC5(%rip), %xmm1
movaps %xmm1, %xmm2
movss .LC7(%rip), %xmm4
movaps %xmm1, %xmm3
andps %xmm4, %xmm3
movss .LC1(%rip), %xmm5
ucomiss %xmm3, %xmm5
jbe .L72
cvttss2sil %xmm1, %edx
pxor %xmm3, %xmm3
cvtsi2ssl %edx, %xmm3
cmpnless %xmm3, %xmm2
movss .LC3(%rip), %xmm5
andps %xmm5, %xmm2
addss %xmm3, %xmm2
andnps %xmm1, %xmm4
orps %xmm4, %xmm2
.L72:
comiss .LC4(%rip), %xmm2
jnb .L73
cvttss2siq %xmm2, %rdx
.L74:
movl %edx, %ecx
movl %edx, 64(%rsp)
comiss .LC4(%rip), %xmm0
jnb .L75
cvttss2siq %xmm0, %rdx
.L76:
movl %edx, 68(%rsp)
movl $1, 72(%rsp)
movl %ecx, 76(%rsp)
movl %eax, 80(%rsp)
movl $1, 84(%rsp)
leaq 24(%rsp), %rdi
call cudaEventCreate@PLT
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movl 48(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movq 52(%rsp), %rdi
movl 60(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L82
.L77:
call cudaDeviceSynchronize@PLT
movl 48(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movq 64(%rsp), %rdi
movl 72(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L83
.L78:
call cudaDeviceSynchronize@PLT
movl 48(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movq 76(%rsp), %rdi
movl 84(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L84
.L79:
call cudaDeviceSynchronize@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movq 32(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 20(%rsp), %rdi
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 20(%rsp), %xmm0
leaq .LC6(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L85
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L66:
.cfi_restore_state
subss .LC4(%rip), %xmm1
cvttss2siq %xmm1, %rax
btcq $63, %rax
jmp .L67
.L69:
subss .LC4(%rip), %xmm1
cvttss2siq %xmm1, %rdx
btcq $63, %rdx
jmp .L70
.L73:
subss .LC4(%rip), %xmm2
cvttss2siq %xmm2, %rdx
btcq $63, %rdx
jmp .L74
.L75:
subss .LC4(%rip), %xmm0
cvttss2siq %xmm0, %rdx
btcq $63, %rdx
jmp .L76
.L82:
movl 200(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 168
movl 200(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 176
movl 200(%rsp), %r9d
movl 192(%rsp), %r8d
movl 184(%rsp), %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq %r13, %rdi
call _Z40__device_stub__Z11mm3_kernel1PfS_S_iiiiiPfS_S_iiiii
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L77
.L83:
movl 200(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 168
movl 200(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 176
movl 200(%rsp), %r9d
movl 192(%rsp), %r8d
movl 184(%rsp), %ecx
movq %rbp, %rdx
movq %r12, %rsi
movq 24(%rsp), %rdi
call _Z40__device_stub__Z11mm3_kernel2PfS_S_iiiiiPfS_S_iiiii
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L78
.L84:
movl 200(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 168
movl 200(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 176
movl 200(%rsp), %r9d
movl 192(%rsp), %r8d
movl 184(%rsp), %ecx
movq %r15, %rdx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z40__device_stub__Z11mm3_kernel3PfS_S_iiiiiPfS_S_iiiii
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L79
.L85:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2071:
.size _Z7mm3CudaPfS_S_S_S_S_S_iiiii, .-_Z7mm3CudaPfS_S_S_S_S_S_iiiii
.section .rodata.str1.1
.LC8:
.string "please no troll\n"
.text
.globl main
.type main, @function
main:
.LFB2072:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $64, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
cmpl $1, %edi
jle .L91
movq %rsi, %rbx
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, NI(%rip)
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, NJ(%rip)
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, NK(%rip)
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, NL(%rip)
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, NM(%rip)
movl NI(%rip), %esi
imull NK(%rip), %esi
movslq %esi, %rsi
salq $2, %rsi
movq %rsp, %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl NK(%rip), %esi
imull NJ(%rip), %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 8(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl NJ(%rip), %esi
imull NM(%rip), %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 16(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl NM(%rip), %esi
imull NL(%rip), %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 24(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl NI(%rip), %esi
imull NJ(%rip), %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 32(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl NJ(%rip), %esi
imull NL(%rip), %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 40(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl NI(%rip), %esi
imull NL(%rip), %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 48(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z10init_arrayPfS_S_S_
movl NM(%rip), %eax
pushq %rax
.cfi_def_cfa_offset 88
movl NL(%rip), %eax
pushq %rax
.cfi_def_cfa_offset 96
movl NK(%rip), %eax
pushq %rax
.cfi_def_cfa_offset 104
movl NJ(%rip), %eax
pushq %rax
.cfi_def_cfa_offset 112
movl NI(%rip), %eax
pushq %rax
.cfi_def_cfa_offset 120
pushq 88(%rsp)
.cfi_def_cfa_offset 128
movq 88(%rsp), %r9
movq 80(%rsp), %r8
movq 72(%rsp), %rcx
movq 64(%rsp), %rdx
movq 56(%rsp), %rsi
movq 48(%rsp), %rdi
call _Z7mm3CudaPfS_S_S_S_S_S_iiiii
addq $48, %rsp
.cfi_def_cfa_offset 80
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movl $0, %eax
.L86:
movq 56(%rsp), %rdx
subq %fs:40, %rdx
jne .L92
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L91:
.cfi_restore_state
leaq .LC8(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $1, %eax
jmp .L86
.L92:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2072:
.size main, .-main
.section .rodata.str1.1
.LC9:
.string "_Z11mm3_kernel3PfS_S_iiiii"
.LC10:
.string "_Z11mm3_kernel2PfS_S_iiiii"
.LC11:
.string "_Z11mm3_kernel1PfS_S_iiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2104:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z11mm3_kernel3PfS_S_iiiii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z11mm3_kernel2PfS_S_iiiii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z11mm3_kernel1PfS_S_iiiii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2104:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl NM
.bss
.align 4
.type NM, @object
.size NM, 4
NM:
.zero 4
.globl NL
.align 4
.type NL, @object
.size NL, 4
NL:
.zero 4
.globl NK
.align 4
.type NK, @object
.size NK, 4
NK:
.zero 4
.globl NJ
.align 4
.type NJ, @object
.size NJ, 4
NJ:
.zero 4
.globl NI
.align 4
.type NI, @object
.size NI, 4
NI:
.zero 4
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1040187392
.align 4
.LC1:
.long 1258291200
.align 4
.LC3:
.long 1065353216
.align 4
.LC4:
.long 1593835520
.align 4
.LC5:
.long 1023410176
.align 4
.LC7:
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /**
* 3mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#define GPU_DEVICE 0
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
/* Problem size. */
//# define NI 512
//# define NJ 512
//# define NK 512
//# define NL 512
//# define NM 512
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
int NI;
int NJ;
int NK;
int NL;
int NM;
void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D)
{
int i, j;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NK; j++)
{
A[i*NK + j] = ((DATA_TYPE) i*j) / NI;
}
}
for (i = 0; i < NK; i++)
{
for (j = 0; j < NJ; j++)
{
B[i*NJ + j] = ((DATA_TYPE) i*(j+1)) / NJ;
}
}
for (i = 0; i < NJ; i++)
{
for (j = 0; j < NM; j++)
{
C[i*NM + j] = ((DATA_TYPE) i*(j+3)) / NL;
}
}
for (i = 0; i < NM; i++)
{
for (j = 0; j < NL; j++)
{
D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK;
}
}
}
__global__ void mm3_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *E,int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NJ))
{
int k;
for(k=0; k < NK; k++)
{
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
__global__ void mm3_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *F,int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NJ) && (j < NL))
{
int k;
for(k=0; k < NM; k++)
{
F[i * NL + j] += C[i * NM + k] * D[k * NL +j];
}
}
}
__global__ void mm3_kernel3(DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G, int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NL))
{
int k;
for(k=0; k < NJ; k++)
{
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
void mm3Cuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu, DATA_TYPE* D_gpu, DATA_TYPE* E_gpu, DATA_TYPE* F_gpu,
DATA_TYPE* G_gpu, int NI, int NJ, int NK, int NL, int NM)
{
cudaEvent_t start, end;
float time;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NJ) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid2((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NJ/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid3((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
//for(int i = 0; i < 5; i++){
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
mm3_kernel1<<<grid1,block>>>(A_gpu, B_gpu, E_gpu,NI, NJ, NK, NL, NM);
cudaDeviceSynchronize();
mm3_kernel2<<<grid2,block>>>(C_gpu, D_gpu, F_gpu,NI, NJ, NK, NL, NM);
cudaDeviceSynchronize();
mm3_kernel3<<<grid3,block>>>(E_gpu, F_gpu, G_gpu,NI, NJ, NK, NL, NM);
cudaDeviceSynchronize();
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, start, end);
fprintf(stdout, "%0.6lf\n", time);
//}
}
int main(int argc, char** argv)
{
if(argc < 2){
printf("please no troll\n");
return 1;
}
NI = atoi(argv[1]);
NJ = atoi(argv[1]);
NK = atoi(argv[1]);
NL = atoi(argv[1]);
NM = atoi(argv[1]);
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* D;
DATA_TYPE* E;
DATA_TYPE* F;
DATA_TYPE* G;
cudaMallocManaged(&A, NI*NK*sizeof(DATA_TYPE));
cudaMallocManaged(&B, NK*NJ*sizeof(DATA_TYPE));
cudaMallocManaged(&C, NJ*NM*sizeof(DATA_TYPE));
cudaMallocManaged(&D, NM*NL*sizeof(DATA_TYPE));
cudaMallocManaged(&E, NI*NJ*sizeof(DATA_TYPE));
cudaMallocManaged(&F, NJ*NL*sizeof(DATA_TYPE));
cudaMallocManaged(&G, NI*NL*sizeof(DATA_TYPE));
init_array(A, B, C, D);
mm3Cuda(A, B, C, D, E, F, G, NI, NJ, NK, NL, NM);
cudaFree(A);
cudaFree(B);
cudaFree(C);
cudaFree(D);
cudaFree(E);
cudaFree(F);
cudaFree(G);
return 0;
} | /**
* 3mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define GPU_DEVICE 0
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
/* Problem size. */
//# define NI 512
//# define NJ 512
//# define NK 512
//# define NL 512
//# define NM 512
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
int NI;
int NJ;
int NK;
int NL;
int NM;
void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D)
{
int i, j;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NK; j++)
{
A[i*NK + j] = ((DATA_TYPE) i*j) / NI;
}
}
for (i = 0; i < NK; i++)
{
for (j = 0; j < NJ; j++)
{
B[i*NJ + j] = ((DATA_TYPE) i*(j+1)) / NJ;
}
}
for (i = 0; i < NJ; i++)
{
for (j = 0; j < NM; j++)
{
C[i*NM + j] = ((DATA_TYPE) i*(j+3)) / NL;
}
}
for (i = 0; i < NM; i++)
{
for (j = 0; j < NL; j++)
{
D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK;
}
}
}
__global__ void mm3_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *E,int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NJ))
{
int k;
for(k=0; k < NK; k++)
{
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
__global__ void mm3_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *F,int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NJ) && (j < NL))
{
int k;
for(k=0; k < NM; k++)
{
F[i * NL + j] += C[i * NM + k] * D[k * NL +j];
}
}
}
__global__ void mm3_kernel3(DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G, int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NL))
{
int k;
for(k=0; k < NJ; k++)
{
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
void mm3Cuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu, DATA_TYPE* D_gpu, DATA_TYPE* E_gpu, DATA_TYPE* F_gpu,
DATA_TYPE* G_gpu, int NI, int NJ, int NK, int NL, int NM)
{
hipEvent_t start, end;
float time;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NJ) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid2((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NJ/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid3((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
//for(int i = 0; i < 5; i++){
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
mm3_kernel1<<<grid1,block>>>(A_gpu, B_gpu, E_gpu,NI, NJ, NK, NL, NM);
hipDeviceSynchronize();
mm3_kernel2<<<grid2,block>>>(C_gpu, D_gpu, F_gpu,NI, NJ, NK, NL, NM);
hipDeviceSynchronize();
mm3_kernel3<<<grid3,block>>>(E_gpu, F_gpu, G_gpu,NI, NJ, NK, NL, NM);
hipDeviceSynchronize();
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&time, start, end);
fprintf(stdout, "%0.6lf\n", time);
//}
}
int main(int argc, char** argv)
{
if(argc < 2){
printf("please no troll\n");
return 1;
}
NI = atoi(argv[1]);
NJ = atoi(argv[1]);
NK = atoi(argv[1]);
NL = atoi(argv[1]);
NM = atoi(argv[1]);
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* D;
DATA_TYPE* E;
DATA_TYPE* F;
DATA_TYPE* G;
hipMallocManaged(&A, NI*NK*sizeof(DATA_TYPE));
hipMallocManaged(&B, NK*NJ*sizeof(DATA_TYPE));
hipMallocManaged(&C, NJ*NM*sizeof(DATA_TYPE));
hipMallocManaged(&D, NM*NL*sizeof(DATA_TYPE));
hipMallocManaged(&E, NI*NJ*sizeof(DATA_TYPE));
hipMallocManaged(&F, NJ*NL*sizeof(DATA_TYPE));
hipMallocManaged(&G, NI*NL*sizeof(DATA_TYPE));
init_array(A, B, C, D);
mm3Cuda(A, B, C, D, E, F, G, NI, NJ, NK, NL, NM);
hipFree(A);
hipFree(B);
hipFree(C);
hipFree(D);
hipFree(E);
hipFree(F);
hipFree(G);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /**
* 3mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define GPU_DEVICE 0
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
/* Problem size. */
//# define NI 512
//# define NJ 512
//# define NK 512
//# define NL 512
//# define NM 512
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
int NI;
int NJ;
int NK;
int NL;
int NM;
void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D)
{
int i, j;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NK; j++)
{
A[i*NK + j] = ((DATA_TYPE) i*j) / NI;
}
}
for (i = 0; i < NK; i++)
{
for (j = 0; j < NJ; j++)
{
B[i*NJ + j] = ((DATA_TYPE) i*(j+1)) / NJ;
}
}
for (i = 0; i < NJ; i++)
{
for (j = 0; j < NM; j++)
{
C[i*NM + j] = ((DATA_TYPE) i*(j+3)) / NL;
}
}
for (i = 0; i < NM; i++)
{
for (j = 0; j < NL; j++)
{
D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK;
}
}
}
__global__ void mm3_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *E,int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NJ))
{
int k;
for(k=0; k < NK; k++)
{
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
__global__ void mm3_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *F,int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NJ) && (j < NL))
{
int k;
for(k=0; k < NM; k++)
{
F[i * NL + j] += C[i * NM + k] * D[k * NL +j];
}
}
}
__global__ void mm3_kernel3(DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G, int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NL))
{
int k;
for(k=0; k < NJ; k++)
{
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
void mm3Cuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu, DATA_TYPE* D_gpu, DATA_TYPE* E_gpu, DATA_TYPE* F_gpu,
DATA_TYPE* G_gpu, int NI, int NJ, int NK, int NL, int NM)
{
hipEvent_t start, end;
float time;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NJ) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid2((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NJ/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid3((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
//for(int i = 0; i < 5; i++){
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
mm3_kernel1<<<grid1,block>>>(A_gpu, B_gpu, E_gpu,NI, NJ, NK, NL, NM);
hipDeviceSynchronize();
mm3_kernel2<<<grid2,block>>>(C_gpu, D_gpu, F_gpu,NI, NJ, NK, NL, NM);
hipDeviceSynchronize();
mm3_kernel3<<<grid3,block>>>(E_gpu, F_gpu, G_gpu,NI, NJ, NK, NL, NM);
hipDeviceSynchronize();
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&time, start, end);
fprintf(stdout, "%0.6lf\n", time);
//}
}
int main(int argc, char** argv)
{
if(argc < 2){
printf("please no troll\n");
return 1;
}
NI = atoi(argv[1]);
NJ = atoi(argv[1]);
NK = atoi(argv[1]);
NL = atoi(argv[1]);
NM = atoi(argv[1]);
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* D;
DATA_TYPE* E;
DATA_TYPE* F;
DATA_TYPE* G;
hipMallocManaged(&A, NI*NK*sizeof(DATA_TYPE));
hipMallocManaged(&B, NK*NJ*sizeof(DATA_TYPE));
hipMallocManaged(&C, NJ*NM*sizeof(DATA_TYPE));
hipMallocManaged(&D, NM*NL*sizeof(DATA_TYPE));
hipMallocManaged(&E, NI*NJ*sizeof(DATA_TYPE));
hipMallocManaged(&F, NJ*NL*sizeof(DATA_TYPE));
hipMallocManaged(&G, NI*NL*sizeof(DATA_TYPE));
init_array(A, B, C, D);
mm3Cuda(A, B, C, D, E, F, G, NI, NJ, NK, NL, NM);
hipFree(A);
hipFree(B);
hipFree(C);
hipFree(D);
hipFree(E);
hipFree(F);
hipFree(G);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11mm3_kernel1PfS_S_iiiii
.globl _Z11mm3_kernel1PfS_S_iiiii
.p2align 8
.type _Z11mm3_kernel1PfS_S_iiiii,@function
_Z11mm3_kernel1PfS_S_iiiii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x3c
s_load_b64 s[4:5], s[0:1], 0x18
s_load_b32 s6, s[0:1], 0x20
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v2, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[4:5], null, s15, s2, v[1:2]
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s4, v4
v_cmp_gt_i32_e64 s2, s5, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, vcc_lo
s_cmp_gt_i32 s6, 0
s_cselect_b32 s3, -1, 0
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_3
s_load_b64 s[2:3], s[0:1], 0x10
v_mad_u64_u32 v[1:2], null, v4, s5, v[0:1]
v_mul_lo_u32 v4, v4, s6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v2, 31, v1
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
s_load_b128 s[0:3], s[0:1], 0x0
global_load_b32 v6, v[2:3], off
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v1, 31, v0
s_add_i32 s6, s6, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_lg_u32 s6, 0
v_lshlrev_b64 v[7:8], 2, v[0:1]
v_add_nc_u32_e32 v0, s5, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s2, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s3, v8, vcc_lo
global_load_b32 v1, v[4:5], off
global_load_b32 v7, v[7:8], off
v_add_co_u32 v4, vcc_lo, v4, 4
v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v6, v1, v7
global_store_b32 v[2:3], v6, off
s_cbranch_scc1 .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11mm3_kernel1PfS_S_iiiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11mm3_kernel1PfS_S_iiiii, .Lfunc_end0-_Z11mm3_kernel1PfS_S_iiiii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z11mm3_kernel2PfS_S_iiiii
.globl _Z11mm3_kernel2PfS_S_iiiii
.p2align 8
.type _Z11mm3_kernel2PfS_S_iiiii,@function
_Z11mm3_kernel2PfS_S_iiiii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x3c
s_load_b32 s3, s[0:1], 0x1c
s_load_b64 s[4:5], s[0:1], 0x24
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v2, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_and_b32 s6, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[4:5], null, s15, s2, v[1:2]
v_mad_u64_u32 v[0:1], null, s14, s6, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s3, v4
v_cmp_gt_i32_e64 s2, s4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, vcc_lo
s_cmp_gt_i32 s5, 0
s_cselect_b32 s3, -1, 0
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB1_3
s_load_b64 s[2:3], s[0:1], 0x10
v_mad_u64_u32 v[1:2], null, v4, s4, v[0:1]
v_mul_lo_u32 v4, v4, s5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v2, 31, v1
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
s_load_b128 s[0:3], s[0:1], 0x0
global_load_b32 v6, v[2:3], off
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
.p2align 6
.LBB1_2:
v_ashrrev_i32_e32 v1, 31, v0
s_add_i32 s5, s5, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_lg_u32 s5, 0
v_lshlrev_b64 v[7:8], 2, v[0:1]
v_add_nc_u32_e32 v0, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s2, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s3, v8, vcc_lo
global_load_b32 v1, v[4:5], off
global_load_b32 v7, v[7:8], off
v_add_co_u32 v4, vcc_lo, v4, 4
v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v6, v1, v7
global_store_b32 v[2:3], v6, off
s_cbranch_scc1 .LBB1_2
.LBB1_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11mm3_kernel2PfS_S_iiiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z11mm3_kernel2PfS_S_iiiii, .Lfunc_end1-_Z11mm3_kernel2PfS_S_iiiii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z11mm3_kernel3PfS_S_iiiii
.globl _Z11mm3_kernel3PfS_S_iiiii
.p2align 8
.type _Z11mm3_kernel3PfS_S_iiiii,@function
_Z11mm3_kernel3PfS_S_iiiii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x3c
s_load_b64 s[4:5], s[0:1], 0x18
s_load_b32 s6, s[0:1], 0x24
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v2, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[4:5], null, s15, s2, v[1:2]
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s4, v4
v_cmp_gt_i32_e64 s2, s6, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, vcc_lo
s_cmp_gt_i32 s5, 0
s_cselect_b32 s3, -1, 0
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB2_3
s_load_b64 s[2:3], s[0:1], 0x10
v_mad_u64_u32 v[1:2], null, v4, s6, v[0:1]
v_mul_lo_u32 v4, v4, s5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v2, 31, v1
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
s_load_b128 s[0:3], s[0:1], 0x0
global_load_b32 v6, v[2:3], off
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
.p2align 6
.LBB2_2:
v_ashrrev_i32_e32 v1, 31, v0
s_add_i32 s5, s5, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_lg_u32 s5, 0
v_lshlrev_b64 v[7:8], 2, v[0:1]
v_add_nc_u32_e32 v0, s6, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s2, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s3, v8, vcc_lo
global_load_b32 v1, v[4:5], off
global_load_b32 v7, v[7:8], off
v_add_co_u32 v4, vcc_lo, v4, 4
v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v6, v1, v7
global_store_b32 v[2:3], v6, off
s_cbranch_scc1 .LBB2_2
.LBB2_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11mm3_kernel3PfS_S_iiiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z11mm3_kernel3PfS_S_iiiii, .Lfunc_end2-_Z11mm3_kernel3PfS_S_iiiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11mm3_kernel1PfS_S_iiiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11mm3_kernel1PfS_S_iiiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11mm3_kernel2PfS_S_iiiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11mm3_kernel2PfS_S_iiiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11mm3_kernel3PfS_S_iiiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11mm3_kernel3PfS_S_iiiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /**
* 3mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define GPU_DEVICE 0
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
/* Problem size. */
//# define NI 512
//# define NJ 512
//# define NK 512
//# define NL 512
//# define NM 512
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
int NI;
int NJ;
int NK;
int NL;
int NM;
void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D)
{
int i, j;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NK; j++)
{
A[i*NK + j] = ((DATA_TYPE) i*j) / NI;
}
}
for (i = 0; i < NK; i++)
{
for (j = 0; j < NJ; j++)
{
B[i*NJ + j] = ((DATA_TYPE) i*(j+1)) / NJ;
}
}
for (i = 0; i < NJ; i++)
{
for (j = 0; j < NM; j++)
{
C[i*NM + j] = ((DATA_TYPE) i*(j+3)) / NL;
}
}
for (i = 0; i < NM; i++)
{
for (j = 0; j < NL; j++)
{
D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK;
}
}
}
__global__ void mm3_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *E,int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NJ))
{
int k;
for(k=0; k < NK; k++)
{
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
__global__ void mm3_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *F,int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NJ) && (j < NL))
{
int k;
for(k=0; k < NM; k++)
{
F[i * NL + j] += C[i * NM + k] * D[k * NL +j];
}
}
}
__global__ void mm3_kernel3(DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G, int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NL))
{
int k;
for(k=0; k < NJ; k++)
{
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
void mm3Cuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu, DATA_TYPE* D_gpu, DATA_TYPE* E_gpu, DATA_TYPE* F_gpu,
DATA_TYPE* G_gpu, int NI, int NJ, int NK, int NL, int NM)
{
hipEvent_t start, end;
float time;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NJ) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid2((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NJ/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid3((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
//for(int i = 0; i < 5; i++){
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
mm3_kernel1<<<grid1,block>>>(A_gpu, B_gpu, E_gpu,NI, NJ, NK, NL, NM);
hipDeviceSynchronize();
mm3_kernel2<<<grid2,block>>>(C_gpu, D_gpu, F_gpu,NI, NJ, NK, NL, NM);
hipDeviceSynchronize();
mm3_kernel3<<<grid3,block>>>(E_gpu, F_gpu, G_gpu,NI, NJ, NK, NL, NM);
hipDeviceSynchronize();
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&time, start, end);
fprintf(stdout, "%0.6lf\n", time);
//}
}
int main(int argc, char** argv)
{
if(argc < 2){
printf("please no troll\n");
return 1;
}
NI = atoi(argv[1]);
NJ = atoi(argv[1]);
NK = atoi(argv[1]);
NL = atoi(argv[1]);
NM = atoi(argv[1]);
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* D;
DATA_TYPE* E;
DATA_TYPE* F;
DATA_TYPE* G;
hipMallocManaged(&A, NI*NK*sizeof(DATA_TYPE));
hipMallocManaged(&B, NK*NJ*sizeof(DATA_TYPE));
hipMallocManaged(&C, NJ*NM*sizeof(DATA_TYPE));
hipMallocManaged(&D, NM*NL*sizeof(DATA_TYPE));
hipMallocManaged(&E, NI*NJ*sizeof(DATA_TYPE));
hipMallocManaged(&F, NJ*NL*sizeof(DATA_TYPE));
hipMallocManaged(&G, NI*NL*sizeof(DATA_TYPE));
init_array(A, B, C, D);
mm3Cuda(A, B, C, D, E, F, G, NI, NJ, NK, NL, NM);
hipFree(A);
hipFree(B);
hipFree(C);
hipFree(D);
hipFree(E);
hipFree(F);
hipFree(G);
return 0;
} | .text
.file "3mm_managed.hip"
.globl _Z10init_arrayPfS_S_S_ # -- Begin function _Z10init_arrayPfS_S_S_
.p2align 4, 0x90
.type _Z10init_arrayPfS_S_S_,@function
_Z10init_arrayPfS_S_S_: # @_Z10init_arrayPfS_S_S_
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movl NI(%rip), %eax
testl %eax, %eax
jle .LBB0_6
# %bb.1: # %.preheader48.lr.ph
cvtsi2ss %eax, %xmm0
movl NK(%rip), %r8d
xorl %r9d, %r9d
xorl %r10d, %r10d
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_5: # %._crit_edge
# in Loop: Header=BB0_2 Depth=1
incq %r10
addl %r8d, %r9d
cmpq %rax, %r10
je .LBB0_6
.LBB0_2: # %.preheader48
# =>This Loop Header: Depth=1
# Child Loop BB0_4 Depth 2
testl %r8d, %r8d
jle .LBB0_5
# %bb.3: # %.lr.ph
# in Loop: Header=BB0_2 Depth=1
movl %r9d, %r11d
leaq (%rdi,%r11,4), %r11
xorps %xmm1, %xmm1
cvtsi2ss %r10d, %xmm1
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB0_4: # Parent Loop BB0_2 Depth=1
# => This Inner Loop Header: Depth=2
xorps %xmm2, %xmm2
cvtsi2ss %ebx, %xmm2
mulss %xmm1, %xmm2
divss %xmm0, %xmm2
movss %xmm2, (%r11,%rbx,4)
incq %rbx
cmpq %rbx, %r8
jne .LBB0_4
jmp .LBB0_5
.LBB0_6: # %.preheader47
movl NK(%rip), %eax
testl %eax, %eax
jle .LBB0_12
# %bb.7: # %.preheader46.lr.ph
movl NJ(%rip), %edi
xorps %xmm0, %xmm0
cvtsi2ss %edi, %xmm0
xorl %r8d, %r8d
xorl %r9d, %r9d
jmp .LBB0_8
.p2align 4, 0x90
.LBB0_11: # %._crit_edge53
# in Loop: Header=BB0_8 Depth=1
incq %r9
addl %edi, %r8d
cmpq %rax, %r9
je .LBB0_12
.LBB0_8: # %.preheader46
# =>This Loop Header: Depth=1
# Child Loop BB0_10 Depth 2
testl %edi, %edi
jle .LBB0_11
# %bb.9: # %.lr.ph52
# in Loop: Header=BB0_8 Depth=1
movl %r8d, %r10d
leaq (%rsi,%r10,4), %r10
xorps %xmm1, %xmm1
cvtsi2ss %r9d, %xmm1
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB0_10: # Parent Loop BB0_8 Depth=1
# => This Inner Loop Header: Depth=2
leaq 1(%r11), %rbx
xorps %xmm2, %xmm2
cvtsi2ss %ebx, %xmm2
mulss %xmm1, %xmm2
divss %xmm0, %xmm2
movss %xmm2, (%r10,%r11,4)
movq %rbx, %r11
cmpq %rbx, %rdi
jne .LBB0_10
jmp .LBB0_11
.LBB0_12: # %.preheader45
movl NJ(%rip), %esi
testl %esi, %esi
jle .LBB0_18
# %bb.13: # %.preheader44.lr.ph
xorps %xmm0, %xmm0
cvtsi2ssl NL(%rip), %xmm0
movl NM(%rip), %edi
xorl %r8d, %r8d
xorl %r9d, %r9d
jmp .LBB0_14
.p2align 4, 0x90
.LBB0_17: # %._crit_edge57
# in Loop: Header=BB0_14 Depth=1
incq %r9
addl %edi, %r8d
cmpq %rsi, %r9
je .LBB0_18
.LBB0_14: # %.preheader44
# =>This Loop Header: Depth=1
# Child Loop BB0_16 Depth 2
testl %edi, %edi
jle .LBB0_17
# %bb.15: # %.lr.ph56
# in Loop: Header=BB0_14 Depth=1
movl %r8d, %r10d
leaq (%rdx,%r10,4), %r10
xorps %xmm1, %xmm1
cvtsi2ss %r9d, %xmm1
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB0_16: # Parent Loop BB0_14 Depth=1
# => This Inner Loop Header: Depth=2
leal 3(%r11), %ebx
xorps %xmm2, %xmm2
cvtsi2ss %ebx, %xmm2
mulss %xmm1, %xmm2
divss %xmm0, %xmm2
movss %xmm2, (%r10,%r11,4)
incq %r11
cmpq %r11, %rdi
jne .LBB0_16
jmp .LBB0_17
.LBB0_18: # %.preheader43
movl NM(%rip), %edx
testl %edx, %edx
jle .LBB0_24
# %bb.19: # %.preheader.lr.ph
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movl NL(%rip), %eax
xorl %esi, %esi
xorl %edi, %edi
jmp .LBB0_20
.p2align 4, 0x90
.LBB0_23: # %._crit_edge61
# in Loop: Header=BB0_20 Depth=1
incq %rdi
addl %eax, %esi
cmpq %rdx, %rdi
je .LBB0_24
.LBB0_20: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_22 Depth 2
testl %eax, %eax
jle .LBB0_23
# %bb.21: # %.lr.ph60
# in Loop: Header=BB0_20 Depth=1
movl %esi, %r8d
leaq (%rcx,%r8,4), %r8
xorps %xmm1, %xmm1
cvtsi2ss %edi, %xmm1
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB0_22: # Parent Loop BB0_20 Depth=1
# => This Inner Loop Header: Depth=2
leal 2(%r9), %r10d
xorps %xmm2, %xmm2
cvtsi2ss %r10d, %xmm2
mulss %xmm1, %xmm2
divss %xmm0, %xmm2
movss %xmm2, (%r8,%r9,4)
incq %r9
cmpq %r9, %rax
jne .LBB0_22
jmp .LBB0_23
.LBB0_24: # %._crit_edge63
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z10init_arrayPfS_S_S_, .Lfunc_end0-_Z10init_arrayPfS_S_S_
.cfi_endproc
# -- End function
.globl _Z26__device_stub__mm3_kernel1PfS_S_iiiii # -- Begin function _Z26__device_stub__mm3_kernel1PfS_S_iiiii
.p2align 4, 0x90
.type _Z26__device_stub__mm3_kernel1PfS_S_iiiii,@function
_Z26__device_stub__mm3_kernel1PfS_S_iiiii: # @_Z26__device_stub__mm3_kernel1PfS_S_iiiii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11mm3_kernel1PfS_S_iiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end1:
.size _Z26__device_stub__mm3_kernel1PfS_S_iiiii, .Lfunc_end1-_Z26__device_stub__mm3_kernel1PfS_S_iiiii
.cfi_endproc
# -- End function
.globl _Z26__device_stub__mm3_kernel2PfS_S_iiiii # -- Begin function _Z26__device_stub__mm3_kernel2PfS_S_iiiii
.p2align 4, 0x90
.type _Z26__device_stub__mm3_kernel2PfS_S_iiiii,@function
_Z26__device_stub__mm3_kernel2PfS_S_iiiii: # @_Z26__device_stub__mm3_kernel2PfS_S_iiiii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11mm3_kernel2PfS_S_iiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end2:
.size _Z26__device_stub__mm3_kernel2PfS_S_iiiii, .Lfunc_end2-_Z26__device_stub__mm3_kernel2PfS_S_iiiii
.cfi_endproc
# -- End function
.globl _Z26__device_stub__mm3_kernel3PfS_S_iiiii # -- Begin function _Z26__device_stub__mm3_kernel3PfS_S_iiiii
.p2align 4, 0x90
.type _Z26__device_stub__mm3_kernel3PfS_S_iiiii,@function
_Z26__device_stub__mm3_kernel3PfS_S_iiiii: # @_Z26__device_stub__mm3_kernel3PfS_S_iiiii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11mm3_kernel3PfS_S_iiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end3:
.size _Z26__device_stub__mm3_kernel3PfS_S_iiiii, .Lfunc_end3-_Z26__device_stub__mm3_kernel3PfS_S_iiiii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z7mm3CudaPfS_S_S_S_S_S_iiiii
.LCPI4_0:
.long 0x3d000000 # float 0.03125
.LCPI4_1:
.long 0x5f000000 # float 9.22337203E+18
.LCPI4_2:
.long 0x3e000000 # float 0.125
.text
.globl _Z7mm3CudaPfS_S_S_S_S_S_iiiii
.p2align 4, 0x90
.type _Z7mm3CudaPfS_S_S_S_S_S_iiiii,@function
_Z7mm3CudaPfS_S_S_S_S_S_iiiii: # @_Z7mm3CudaPfS_S_S_S_S_S_iiiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $232, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, 192(%rsp) # 8-byte Spill
movq %r8, 184(%rsp) # 8-byte Spill
movq %rcx, 224(%rsp) # 8-byte Spill
movq %rdx, 216(%rsp) # 8-byte Spill
movq %rsi, 208(%rsp) # 8-byte Spill
movq %rdi, %r14
cvtsi2ssl 304(%rsp), %xmm0
movss %xmm0, 108(%rsp) # 4-byte Spill
movabsq $34359738400, %rbx # imm = 0x800000020
mulss .LCPI4_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r13
movq %r13, %rax
subss .LCPI4_1(%rip), %xmm0
cvttss2si %xmm0, %rcx
sarq $63, %rax
xorps %xmm0, %xmm0
cvtsi2ssl 296(%rsp), %xmm0
andl %eax, %ecx
orl %ecx, %r13d
mulss .LCPI4_2(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r15
movq %r15, %rax
sarq $63, %rax
subss .LCPI4_1(%rip), %xmm0
cvttss2si %xmm0, %rcx
andl %eax, %ecx
orl %ecx, %r15d
shlq $32, %r15
orq %r15, %r13
xorps %xmm0, %xmm0
cvtsi2ssl 320(%rsp), %xmm0
mulss .LCPI4_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %rbp
movq %rbp, %rax
subss .LCPI4_1(%rip), %xmm0
cvttss2si %xmm0, %rcx
sarq $63, %rax
andl %eax, %ecx
orl %ecx, %ebp
movss 108(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
mulss .LCPI4_2(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r12
movq %r12, %rax
sarq $63, %rax
subss .LCPI4_1(%rip), %xmm0
cvttss2si %xmm0, %rcx
andl %eax, %ecx
orl %ecx, %r12d
shlq $32, %r12
orq %rbp, %r12
leaq 200(%rsp), %rdi
callq hipEventCreate
leaq 96(%rsp), %rdi
callq hipEventCreate
movq 200(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq %r13, %rdi
movl 328(%rsp), %r13d
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_2
# %bb.1:
movq %r14, 88(%rsp)
movq 208(%rsp), %rax # 8-byte Reload
movq %rax, 80(%rsp)
movq 184(%rsp), %rax # 8-byte Reload
movq %rax, 72(%rsp)
movl 296(%rsp), %eax
movl %eax, 20(%rsp)
movl 304(%rsp), %eax
movl %eax, 16(%rsp)
movl 312(%rsp), %eax
movl %eax, 12(%rsp)
movl 320(%rsp), %eax
movl %eax, 8(%rsp)
movl %r13d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
leaq 4(%rsp), %rax
movq %rax, 168(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11mm3_kernel1PfS_S_iiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_2:
orq %r15, %rbp
callq hipDeviceSynchronize
movq %r12, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_4
# %bb.3:
movq 216(%rsp), %rax # 8-byte Reload
movq %rax, 88(%rsp)
movq 224(%rsp), %rax # 8-byte Reload
movq %rax, 80(%rsp)
movq 192(%rsp), %rax # 8-byte Reload
movq %rax, 72(%rsp)
movl 296(%rsp), %eax
movl %eax, 20(%rsp)
movl 304(%rsp), %eax
movl %eax, 16(%rsp)
movl 312(%rsp), %eax
movl %eax, 12(%rsp)
movl 320(%rsp), %eax
movl %eax, 8(%rsp)
movl %r13d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
leaq 4(%rsp), %rax
movq %rax, 168(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11mm3_kernel2PfS_S_iiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_4:
callq hipDeviceSynchronize
movq %rbp, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_6
# %bb.5:
movq 288(%rsp), %rax
movq 184(%rsp), %rcx # 8-byte Reload
movq %rcx, 88(%rsp)
movq 192(%rsp), %rcx # 8-byte Reload
movq %rcx, 80(%rsp)
movq %rax, 72(%rsp)
movl 296(%rsp), %eax
movl %eax, 20(%rsp)
movl 304(%rsp), %eax
movl %eax, 16(%rsp)
movl 312(%rsp), %eax
movl %eax, 12(%rsp)
movl 320(%rsp), %eax
movl %eax, 8(%rsp)
movl %r13d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
leaq 4(%rsp), %rax
movq %rax, 168(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11mm3_kernel3PfS_S_iiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_6:
callq hipDeviceSynchronize
movq 96(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 96(%rsp), %rdi
callq hipEventSynchronize
movq 200(%rsp), %rsi
movq 96(%rsp), %rdx
leaq 112(%rsp), %rdi
callq hipEventElapsedTime
movq stdout(%rip), %rdi
movss 112(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %esi
movb $1, %al
callq fprintf
addq $232, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z7mm3CudaPfS_S_S_S_S_S_iiiii, .Lfunc_end4-_Z7mm3CudaPfS_S_S_S_S_S_iiiii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $64, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
cmpl $1, %edi
jg .LBB5_2
# %bb.1:
movl $.Lstr, %edi
callq puts@PLT
movl $1, %ebx
jmp .LBB5_3
.LBB5_2:
movq 8(%rsi), %r14
xorl %ebx, %ebx
movq %r14, %rdi
movq %rsi, %r15
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, NI(%rip)
movq %r14, %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, NJ(%rip)
movq %r14, %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, NK(%rip)
movq 8(%r15), %r14
movq %r14, %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, NL(%rip)
movq %r14, %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, NM(%rip)
movslq NI(%rip), %rax
movslq NK(%rip), %rsi
imulq %rax, %rsi
shlq $2, %rsi
leaq 32(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movslq NK(%rip), %rax
movslq NJ(%rip), %rsi
imulq %rax, %rsi
shlq $2, %rsi
leaq 24(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movslq NJ(%rip), %rax
movslq NM(%rip), %rsi
imulq %rax, %rsi
shlq $2, %rsi
leaq 16(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movslq NM(%rip), %rax
movslq NL(%rip), %rsi
imulq %rax, %rsi
shlq $2, %rsi
leaq 8(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movslq NI(%rip), %rax
movslq NJ(%rip), %rsi
imulq %rax, %rsi
shlq $2, %rsi
leaq 56(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movslq NJ(%rip), %rax
movslq NL(%rip), %rsi
imulq %rax, %rsi
shlq $2, %rsi
leaq 48(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movslq NI(%rip), %rax
movslq NL(%rip), %rsi
imulq %rax, %rsi
shlq $2, %rsi
leaq 40(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movq 32(%rsp), %rdi
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
movq 8(%rsp), %rcx
callq _Z10init_arrayPfS_S_S_
movq 32(%rsp), %rdi
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
movq 8(%rsp), %rcx
movq 56(%rsp), %r8
movq 48(%rsp), %r9
movl NI(%rip), %eax
movl NJ(%rip), %r10d
movl NK(%rip), %r11d
movl NL(%rip), %r14d
movl NM(%rip), %r15d
pushq %r15
.cfi_adjust_cfa_offset 8
pushq %r14
.cfi_adjust_cfa_offset 8
pushq %r11
.cfi_adjust_cfa_offset 8
pushq %r10
.cfi_adjust_cfa_offset 8
pushq %rax
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq _Z7mm3CudaPfS_S_S_S_S_S_iiiii
addq $48, %rsp
.cfi_adjust_cfa_offset -48
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
.LBB5_3:
movl %ebx, %eax
addq $64, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11mm3_kernel1PfS_S_iiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11mm3_kernel2PfS_S_iiiii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11mm3_kernel3PfS_S_iiiii, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type NI,@object # @NI
.bss
.globl NI
.p2align 2, 0x0
NI:
.long 0 # 0x0
.size NI, 4
.type NJ,@object # @NJ
.globl NJ
.p2align 2, 0x0
NJ:
.long 0 # 0x0
.size NJ, 4
.type NK,@object # @NK
.globl NK
.p2align 2, 0x0
NK:
.long 0 # 0x0
.size NK, 4
.type NL,@object # @NL
.globl NL
.p2align 2, 0x0
NL:
.long 0 # 0x0
.size NL, 4
.type NM,@object # @NM
.globl NM
.p2align 2, 0x0
NM:
.long 0 # 0x0
.size NM, 4
.type _Z11mm3_kernel1PfS_S_iiiii,@object # @_Z11mm3_kernel1PfS_S_iiiii
.section .rodata,"a",@progbits
.globl _Z11mm3_kernel1PfS_S_iiiii
.p2align 3, 0x0
_Z11mm3_kernel1PfS_S_iiiii:
.quad _Z26__device_stub__mm3_kernel1PfS_S_iiiii
.size _Z11mm3_kernel1PfS_S_iiiii, 8
.type _Z11mm3_kernel2PfS_S_iiiii,@object # @_Z11mm3_kernel2PfS_S_iiiii
.globl _Z11mm3_kernel2PfS_S_iiiii
.p2align 3, 0x0
_Z11mm3_kernel2PfS_S_iiiii:
.quad _Z26__device_stub__mm3_kernel2PfS_S_iiiii
.size _Z11mm3_kernel2PfS_S_iiiii, 8
.type _Z11mm3_kernel3PfS_S_iiiii,@object # @_Z11mm3_kernel3PfS_S_iiiii
.globl _Z11mm3_kernel3PfS_S_iiiii
.p2align 3, 0x0
_Z11mm3_kernel3PfS_S_iiiii:
.quad _Z26__device_stub__mm3_kernel3PfS_S_iiiii
.size _Z11mm3_kernel3PfS_S_iiiii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%0.6lf\n"
.size .L.str, 8
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11mm3_kernel1PfS_S_iiiii"
.size .L__unnamed_1, 27
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z11mm3_kernel2PfS_S_iiiii"
.size .L__unnamed_2, 27
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z11mm3_kernel3PfS_S_iiiii"
.size .L__unnamed_3, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "please no troll"
.size .Lstr, 16
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__mm3_kernel1PfS_S_iiiii
.addrsig_sym _Z26__device_stub__mm3_kernel2PfS_S_iiiii
.addrsig_sym _Z26__device_stub__mm3_kernel3PfS_S_iiiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11mm3_kernel1PfS_S_iiiii
.addrsig_sym _Z11mm3_kernel2PfS_S_iiiii
.addrsig_sym _Z11mm3_kernel3PfS_S_iiiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00032350_00000000-6_3mm_managed.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2075:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2075:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10init_arrayPfS_S_S_
.type _Z10init_arrayPfS_S_S_, @function
_Z10init_arrayPfS_S_S_:
.LFB2070:
.cfi_startproc
endbr64
movq %rdi, %r8
movq %rsi, %rdi
movq %rdx, %rsi
movq %rcx, %rdx
movl $0, %r9d
cmpl $0, NI(%rip)
jg .L4
.L5:
movl $0, %r10d
cmpl $0, NK(%rip)
jg .L8
.L9:
movl $0, %r8d
cmpl $0, NJ(%rip)
jg .L12
.L13:
movl $0, %edi
cmpl $0, NM(%rip)
jg .L16
ret
.L6:
imull %r9d, %eax
addl %ecx, %eax
cltq
pxor %xmm0, %xmm0
cvtsi2ssl %ecx, %xmm0
mulss %xmm2, %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl NI(%rip), %xmm1
divss %xmm1, %xmm0
movss %xmm0, (%r8,%rax,4)
addl $1, %ecx
movl NK(%rip), %eax
cmpl %ecx, %eax
jg .L6
.L7:
addl $1, %r9d
cmpl %r9d, NI(%rip)
jle .L5
.L4:
movl NK(%rip), %eax
movl $0, %ecx
testl %eax, %eax
jle .L7
pxor %xmm2, %xmm2
cvtsi2ssl %r9d, %xmm2
jmp .L6
.L10:
movl %ecx, %r9d
addl $1, %ecx
movl %r8d, %eax
imull %r10d, %eax
addl %r9d, %eax
cltq
pxor %xmm0, %xmm0
cvtsi2ssl %ecx, %xmm0
mulss %xmm2, %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl %r8d, %xmm1
divss %xmm1, %xmm0
movss %xmm0, (%rdi,%rax,4)
movl NJ(%rip), %r8d
cmpl %r8d, %ecx
jl .L10
.L11:
addl $1, %r10d
cmpl %r10d, NK(%rip)
jle .L9
.L8:
movl NJ(%rip), %r8d
movl $0, %ecx
testl %r8d, %r8d
jle .L11
pxor %xmm2, %xmm2
cvtsi2ssl %r10d, %xmm2
jmp .L10
.L14:
imull %r8d, %eax
addl %ecx, %eax
cltq
leal 3(%rcx), %edi
pxor %xmm0, %xmm0
cvtsi2ssl %edi, %xmm0
mulss %xmm2, %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl NL(%rip), %xmm1
divss %xmm1, %xmm0
movss %xmm0, (%rsi,%rax,4)
addl $1, %ecx
movl NM(%rip), %eax
cmpl %ecx, %eax
jg .L14
.L15:
addl $1, %r8d
cmpl %r8d, NJ(%rip)
jle .L13
.L12:
movl NM(%rip), %eax
movl $0, %ecx
testl %eax, %eax
jle .L15
pxor %xmm2, %xmm2
cvtsi2ssl %r8d, %xmm2
jmp .L14
.L18:
imull %edi, %eax
addl %ecx, %eax
cltq
leal 2(%rcx), %esi
pxor %xmm0, %xmm0
cvtsi2ssl %esi, %xmm0
mulss %xmm2, %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl NK(%rip), %xmm1
divss %xmm1, %xmm0
movss %xmm0, (%rdx,%rax,4)
addl $1, %ecx
movl NL(%rip), %eax
cmpl %ecx, %eax
jg .L18
.L19:
addl $1, %edi
cmpl %edi, NM(%rip)
jle .L3
.L16:
movl NL(%rip), %eax
movl $0, %ecx
testl %eax, %eax
jle .L19
pxor %xmm2, %xmm2
cvtsi2ssl %edi, %xmm2
jmp .L18
.L3:
ret
.cfi_endproc
.LFE2070:
.size _Z10init_arrayPfS_S_S_, .-_Z10init_arrayPfS_S_S_
.globl _Z40__device_stub__Z11mm3_kernel1PfS_S_iiiiiPfS_S_iiiii
.type _Z40__device_stub__Z11mm3_kernel1PfS_S_iiiiiPfS_S_iiiii, @function
_Z40__device_stub__Z11mm3_kernel1PfS_S_iiiiiPfS_S_iiiii:
.LFB2097:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L44
.L40:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L45
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L44:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z11mm3_kernel1PfS_S_iiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L40
.L45:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2097:
.size _Z40__device_stub__Z11mm3_kernel1PfS_S_iiiiiPfS_S_iiiii, .-_Z40__device_stub__Z11mm3_kernel1PfS_S_iiiiiPfS_S_iiiii
.globl _Z11mm3_kernel1PfS_S_iiiii
.type _Z11mm3_kernel1PfS_S_iiiii, @function
_Z11mm3_kernel1PfS_S_iiiii:
.LFB2098:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z40__device_stub__Z11mm3_kernel1PfS_S_iiiiiPfS_S_iiiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _Z11mm3_kernel1PfS_S_iiiii, .-_Z11mm3_kernel1PfS_S_iiiii
.globl _Z40__device_stub__Z11mm3_kernel2PfS_S_iiiiiPfS_S_iiiii
.type _Z40__device_stub__Z11mm3_kernel2PfS_S_iiiiiPfS_S_iiiii, @function
_Z40__device_stub__Z11mm3_kernel2PfS_S_iiiiiPfS_S_iiiii:
.LFB2099:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L52
.L48:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L53
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L52:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z11mm3_kernel2PfS_S_iiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L48
.L53:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2099:
.size _Z40__device_stub__Z11mm3_kernel2PfS_S_iiiiiPfS_S_iiiii, .-_Z40__device_stub__Z11mm3_kernel2PfS_S_iiiiiPfS_S_iiiii
.globl _Z11mm3_kernel2PfS_S_iiiii
.type _Z11mm3_kernel2PfS_S_iiiii, @function
_Z11mm3_kernel2PfS_S_iiiii:
.LFB2100:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z40__device_stub__Z11mm3_kernel2PfS_S_iiiiiPfS_S_iiiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2100:
.size _Z11mm3_kernel2PfS_S_iiiii, .-_Z11mm3_kernel2PfS_S_iiiii
.globl _Z40__device_stub__Z11mm3_kernel3PfS_S_iiiiiPfS_S_iiiii
.type _Z40__device_stub__Z11mm3_kernel3PfS_S_iiiiiPfS_S_iiiii, @function
_Z40__device_stub__Z11mm3_kernel3PfS_S_iiiiiPfS_S_iiiii:
.LFB2101:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L60
.L56:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L61
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L60:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z11mm3_kernel3PfS_S_iiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L56
.L61:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2101:
.size _Z40__device_stub__Z11mm3_kernel3PfS_S_iiiiiPfS_S_iiiii, .-_Z40__device_stub__Z11mm3_kernel3PfS_S_iiiiiPfS_S_iiiii
.globl _Z11mm3_kernel3PfS_S_iiiii
.type _Z11mm3_kernel3PfS_S_iiiii, @function
_Z11mm3_kernel3PfS_S_iiiii:
.LFB2102:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z40__device_stub__Z11mm3_kernel3PfS_S_iiiiiPfS_S_iiiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2102:
.size _Z11mm3_kernel3PfS_S_iiiii, .-_Z11mm3_kernel3PfS_S_iiiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC6:
.string "%0.6lf\n"
.text
.globl _Z7mm3CudaPfS_S_S_S_S_S_iiiii
.type _Z7mm3CudaPfS_S_S_S_S_S_iiiii, @function
_Z7mm3CudaPfS_S_S_S_S_S_iiiii:
.LFB2071:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %rdi, %r13
movq %rsi, %r14
movq %rdx, 8(%rsp)
movq %rcx, %r12
movq %r8, %rbx
movq %r9, %rbp
movq 160(%rsp), %r15
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $32, 40(%rsp)
movl $8, 44(%rsp)
movl $1, 48(%rsp)
pxor %xmm0, %xmm0
cvtsi2ssl 168(%rsp), %xmm0
mulss .LC0(%rip), %xmm0
movaps %xmm0, %xmm1
movss .LC7(%rip), %xmm3
movaps %xmm0, %xmm2
andps %xmm3, %xmm2
movss .LC1(%rip), %xmm4
ucomiss %xmm2, %xmm4
jbe .L65
cvttss2sil %xmm0, %eax
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
cmpnless %xmm2, %xmm1
movss .LC3(%rip), %xmm4
andps %xmm4, %xmm1
addss %xmm2, %xmm1
andnps %xmm0, %xmm3
orps %xmm3, %xmm1
.L65:
comiss .LC4(%rip), %xmm1
jnb .L66
cvttss2siq %xmm1, %rax
.L67:
pxor %xmm0, %xmm0
cvtsi2ssl 176(%rsp), %xmm0
movaps %xmm0, %xmm3
mulss .LC5(%rip), %xmm3
movaps %xmm3, %xmm1
movss .LC7(%rip), %xmm4
movaps %xmm3, %xmm2
andps %xmm4, %xmm2
movss .LC1(%rip), %xmm5
ucomiss %xmm2, %xmm5
jbe .L68
cvttss2sil %xmm3, %edx
pxor %xmm2, %xmm2
cvtsi2ssl %edx, %xmm2
cmpnless %xmm2, %xmm1
movss .LC3(%rip), %xmm5
andps %xmm5, %xmm1
addss %xmm2, %xmm1
andnps %xmm3, %xmm4
orps %xmm4, %xmm1
.L68:
comiss .LC4(%rip), %xmm1
jnb .L69
cvttss2siq %xmm1, %rdx
.L70:
movl %edx, 52(%rsp)
movl %eax, 56(%rsp)
movl $1, 60(%rsp)
mulss .LC0(%rip), %xmm0
movaps %xmm0, %xmm1
movss .LC7(%rip), %xmm3
movaps %xmm0, %xmm2
andps %xmm3, %xmm2
movss .LC1(%rip), %xmm4
ucomiss %xmm2, %xmm4
jbe .L71
cvttss2sil %xmm0, %edx
pxor %xmm2, %xmm2
cvtsi2ssl %edx, %xmm2
cmpnless %xmm2, %xmm1
movss .LC3(%rip), %xmm4
andps %xmm4, %xmm1
addss %xmm2, %xmm1
andnps %xmm0, %xmm3
orps %xmm3, %xmm1
.L71:
movaps %xmm1, %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl 192(%rsp), %xmm1
mulss .LC5(%rip), %xmm1
movaps %xmm1, %xmm2
movss .LC7(%rip), %xmm4
movaps %xmm1, %xmm3
andps %xmm4, %xmm3
movss .LC1(%rip), %xmm5
ucomiss %xmm3, %xmm5
jbe .L72
cvttss2sil %xmm1, %edx
pxor %xmm3, %xmm3
cvtsi2ssl %edx, %xmm3
cmpnless %xmm3, %xmm2
movss .LC3(%rip), %xmm5
andps %xmm5, %xmm2
addss %xmm3, %xmm2
andnps %xmm1, %xmm4
orps %xmm4, %xmm2
.L72:
comiss .LC4(%rip), %xmm2
jnb .L73
cvttss2siq %xmm2, %rdx
.L74:
movl %edx, %ecx
movl %edx, 64(%rsp)
comiss .LC4(%rip), %xmm0
jnb .L75
cvttss2siq %xmm0, %rdx
.L76:
movl %edx, 68(%rsp)
movl $1, 72(%rsp)
movl %ecx, 76(%rsp)
movl %eax, 80(%rsp)
movl $1, 84(%rsp)
leaq 24(%rsp), %rdi
call cudaEventCreate@PLT
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movl 48(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movq 52(%rsp), %rdi
movl 60(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L82
.L77:
call cudaDeviceSynchronize@PLT
movl 48(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movq 64(%rsp), %rdi
movl 72(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L83
.L78:
call cudaDeviceSynchronize@PLT
movl 48(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movq 76(%rsp), %rdi
movl 84(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L84
.L79:
call cudaDeviceSynchronize@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movq 32(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 20(%rsp), %rdi
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 20(%rsp), %xmm0
leaq .LC6(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L85
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L66:
.cfi_restore_state
subss .LC4(%rip), %xmm1
cvttss2siq %xmm1, %rax
btcq $63, %rax
jmp .L67
.L69:
subss .LC4(%rip), %xmm1
cvttss2siq %xmm1, %rdx
btcq $63, %rdx
jmp .L70
.L73:
subss .LC4(%rip), %xmm2
cvttss2siq %xmm2, %rdx
btcq $63, %rdx
jmp .L74
.L75:
subss .LC4(%rip), %xmm0
cvttss2siq %xmm0, %rdx
btcq $63, %rdx
jmp .L76
.L82:
movl 200(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 168
movl 200(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 176
movl 200(%rsp), %r9d
movl 192(%rsp), %r8d
movl 184(%rsp), %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq %r13, %rdi
call _Z40__device_stub__Z11mm3_kernel1PfS_S_iiiiiPfS_S_iiiii
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L77
.L83:
movl 200(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 168
movl 200(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 176
movl 200(%rsp), %r9d
movl 192(%rsp), %r8d
movl 184(%rsp), %ecx
movq %rbp, %rdx
movq %r12, %rsi
movq 24(%rsp), %rdi
call _Z40__device_stub__Z11mm3_kernel2PfS_S_iiiiiPfS_S_iiiii
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L78
.L84:
movl 200(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 168
movl 200(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 176
movl 200(%rsp), %r9d
movl 192(%rsp), %r8d
movl 184(%rsp), %ecx
movq %r15, %rdx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z40__device_stub__Z11mm3_kernel3PfS_S_iiiiiPfS_S_iiiii
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L79
.L85:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2071:
.size _Z7mm3CudaPfS_S_S_S_S_S_iiiii, .-_Z7mm3CudaPfS_S_S_S_S_S_iiiii
.section .rodata.str1.1
.LC8:
.string "please no troll\n"
.text
.globl main
.type main, @function
main:
.LFB2072:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $64, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
cmpl $1, %edi
jle .L91
movq %rsi, %rbx
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, NI(%rip)
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, NJ(%rip)
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, NK(%rip)
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, NL(%rip)
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, NM(%rip)
movl NI(%rip), %esi
imull NK(%rip), %esi
movslq %esi, %rsi
salq $2, %rsi
movq %rsp, %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl NK(%rip), %esi
imull NJ(%rip), %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 8(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl NJ(%rip), %esi
imull NM(%rip), %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 16(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl NM(%rip), %esi
imull NL(%rip), %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 24(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl NI(%rip), %esi
imull NJ(%rip), %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 32(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl NJ(%rip), %esi
imull NL(%rip), %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 40(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl NI(%rip), %esi
imull NL(%rip), %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 48(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z10init_arrayPfS_S_S_
movl NM(%rip), %eax
pushq %rax
.cfi_def_cfa_offset 88
movl NL(%rip), %eax
pushq %rax
.cfi_def_cfa_offset 96
movl NK(%rip), %eax
pushq %rax
.cfi_def_cfa_offset 104
movl NJ(%rip), %eax
pushq %rax
.cfi_def_cfa_offset 112
movl NI(%rip), %eax
pushq %rax
.cfi_def_cfa_offset 120
pushq 88(%rsp)
.cfi_def_cfa_offset 128
movq 88(%rsp), %r9
movq 80(%rsp), %r8
movq 72(%rsp), %rcx
movq 64(%rsp), %rdx
movq 56(%rsp), %rsi
movq 48(%rsp), %rdi
call _Z7mm3CudaPfS_S_S_S_S_S_iiiii
addq $48, %rsp
.cfi_def_cfa_offset 80
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movl $0, %eax
.L86:
movq 56(%rsp), %rdx
subq %fs:40, %rdx
jne .L92
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L91:
.cfi_restore_state
leaq .LC8(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $1, %eax
jmp .L86
.L92:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2072:
.size main, .-main
.section .rodata.str1.1
.LC9:
.string "_Z11mm3_kernel3PfS_S_iiiii"
.LC10:
.string "_Z11mm3_kernel2PfS_S_iiiii"
.LC11:
.string "_Z11mm3_kernel1PfS_S_iiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2104:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z11mm3_kernel3PfS_S_iiiii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z11mm3_kernel2PfS_S_iiiii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z11mm3_kernel1PfS_S_iiiii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2104:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl NM
.bss
.align 4
.type NM, @object
.size NM, 4
NM:
.zero 4
.globl NL
.align 4
.type NL, @object
.size NL, 4
NL:
.zero 4
.globl NK
.align 4
.type NK, @object
.size NK, 4
NK:
.zero 4
.globl NJ
.align 4
.type NJ, @object
.size NJ, 4
NJ:
.zero 4
.globl NI
.align 4
.type NI, @object
.size NI, 4
NI:
.zero 4
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1040187392
.align 4
.LC1:
.long 1258291200
.align 4
.LC3:
.long 1065353216
.align 4
.LC4:
.long 1593835520
.align 4
.LC5:
.long 1023410176
.align 4
.LC7:
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "3mm_managed.hip"
.globl _Z10init_arrayPfS_S_S_ # -- Begin function _Z10init_arrayPfS_S_S_
.p2align 4, 0x90
.type _Z10init_arrayPfS_S_S_,@function
_Z10init_arrayPfS_S_S_: # @_Z10init_arrayPfS_S_S_
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movl NI(%rip), %eax
testl %eax, %eax
jle .LBB0_6
# %bb.1: # %.preheader48.lr.ph
cvtsi2ss %eax, %xmm0
movl NK(%rip), %r8d
xorl %r9d, %r9d
xorl %r10d, %r10d
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_5: # %._crit_edge
# in Loop: Header=BB0_2 Depth=1
incq %r10
addl %r8d, %r9d
cmpq %rax, %r10
je .LBB0_6
.LBB0_2: # %.preheader48
# =>This Loop Header: Depth=1
# Child Loop BB0_4 Depth 2
testl %r8d, %r8d
jle .LBB0_5
# %bb.3: # %.lr.ph
# in Loop: Header=BB0_2 Depth=1
movl %r9d, %r11d
leaq (%rdi,%r11,4), %r11
xorps %xmm1, %xmm1
cvtsi2ss %r10d, %xmm1
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB0_4: # Parent Loop BB0_2 Depth=1
# => This Inner Loop Header: Depth=2
xorps %xmm2, %xmm2
cvtsi2ss %ebx, %xmm2
mulss %xmm1, %xmm2
divss %xmm0, %xmm2
movss %xmm2, (%r11,%rbx,4)
incq %rbx
cmpq %rbx, %r8
jne .LBB0_4
jmp .LBB0_5
.LBB0_6: # %.preheader47
movl NK(%rip), %eax
testl %eax, %eax
jle .LBB0_12
# %bb.7: # %.preheader46.lr.ph
movl NJ(%rip), %edi
xorps %xmm0, %xmm0
cvtsi2ss %edi, %xmm0
xorl %r8d, %r8d
xorl %r9d, %r9d
jmp .LBB0_8
.p2align 4, 0x90
.LBB0_11: # %._crit_edge53
# in Loop: Header=BB0_8 Depth=1
incq %r9
addl %edi, %r8d
cmpq %rax, %r9
je .LBB0_12
.LBB0_8: # %.preheader46
# =>This Loop Header: Depth=1
# Child Loop BB0_10 Depth 2
testl %edi, %edi
jle .LBB0_11
# %bb.9: # %.lr.ph52
# in Loop: Header=BB0_8 Depth=1
movl %r8d, %r10d
leaq (%rsi,%r10,4), %r10
xorps %xmm1, %xmm1
cvtsi2ss %r9d, %xmm1
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB0_10: # Parent Loop BB0_8 Depth=1
# => This Inner Loop Header: Depth=2
leaq 1(%r11), %rbx
xorps %xmm2, %xmm2
cvtsi2ss %ebx, %xmm2
mulss %xmm1, %xmm2
divss %xmm0, %xmm2
movss %xmm2, (%r10,%r11,4)
movq %rbx, %r11
cmpq %rbx, %rdi
jne .LBB0_10
jmp .LBB0_11
.LBB0_12: # %.preheader45
movl NJ(%rip), %esi
testl %esi, %esi
jle .LBB0_18
# %bb.13: # %.preheader44.lr.ph
xorps %xmm0, %xmm0
cvtsi2ssl NL(%rip), %xmm0
movl NM(%rip), %edi
xorl %r8d, %r8d
xorl %r9d, %r9d
jmp .LBB0_14
.p2align 4, 0x90
.LBB0_17: # %._crit_edge57
# in Loop: Header=BB0_14 Depth=1
incq %r9
addl %edi, %r8d
cmpq %rsi, %r9
je .LBB0_18
.LBB0_14: # %.preheader44
# =>This Loop Header: Depth=1
# Child Loop BB0_16 Depth 2
testl %edi, %edi
jle .LBB0_17
# %bb.15: # %.lr.ph56
# in Loop: Header=BB0_14 Depth=1
movl %r8d, %r10d
leaq (%rdx,%r10,4), %r10
xorps %xmm1, %xmm1
cvtsi2ss %r9d, %xmm1
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB0_16: # Parent Loop BB0_14 Depth=1
# => This Inner Loop Header: Depth=2
leal 3(%r11), %ebx
xorps %xmm2, %xmm2
cvtsi2ss %ebx, %xmm2
mulss %xmm1, %xmm2
divss %xmm0, %xmm2
movss %xmm2, (%r10,%r11,4)
incq %r11
cmpq %r11, %rdi
jne .LBB0_16
jmp .LBB0_17
.LBB0_18: # %.preheader43
movl NM(%rip), %edx
testl %edx, %edx
jle .LBB0_24
# %bb.19: # %.preheader.lr.ph
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movl NL(%rip), %eax
xorl %esi, %esi
xorl %edi, %edi
jmp .LBB0_20
.p2align 4, 0x90
.LBB0_23: # %._crit_edge61
# in Loop: Header=BB0_20 Depth=1
incq %rdi
addl %eax, %esi
cmpq %rdx, %rdi
je .LBB0_24
.LBB0_20: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_22 Depth 2
testl %eax, %eax
jle .LBB0_23
# %bb.21: # %.lr.ph60
# in Loop: Header=BB0_20 Depth=1
movl %esi, %r8d
leaq (%rcx,%r8,4), %r8
xorps %xmm1, %xmm1
cvtsi2ss %edi, %xmm1
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB0_22: # Parent Loop BB0_20 Depth=1
# => This Inner Loop Header: Depth=2
leal 2(%r9), %r10d
xorps %xmm2, %xmm2
cvtsi2ss %r10d, %xmm2
mulss %xmm1, %xmm2
divss %xmm0, %xmm2
movss %xmm2, (%r8,%r9,4)
incq %r9
cmpq %r9, %rax
jne .LBB0_22
jmp .LBB0_23
.LBB0_24: # %._crit_edge63
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z10init_arrayPfS_S_S_, .Lfunc_end0-_Z10init_arrayPfS_S_S_
.cfi_endproc
# -- End function
.globl _Z26__device_stub__mm3_kernel1PfS_S_iiiii # -- Begin function _Z26__device_stub__mm3_kernel1PfS_S_iiiii
.p2align 4, 0x90
.type _Z26__device_stub__mm3_kernel1PfS_S_iiiii,@function
_Z26__device_stub__mm3_kernel1PfS_S_iiiii: # @_Z26__device_stub__mm3_kernel1PfS_S_iiiii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11mm3_kernel1PfS_S_iiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end1:
.size _Z26__device_stub__mm3_kernel1PfS_S_iiiii, .Lfunc_end1-_Z26__device_stub__mm3_kernel1PfS_S_iiiii
.cfi_endproc
# -- End function
.globl _Z26__device_stub__mm3_kernel2PfS_S_iiiii # -- Begin function _Z26__device_stub__mm3_kernel2PfS_S_iiiii
.p2align 4, 0x90
.type _Z26__device_stub__mm3_kernel2PfS_S_iiiii,@function
_Z26__device_stub__mm3_kernel2PfS_S_iiiii: # @_Z26__device_stub__mm3_kernel2PfS_S_iiiii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11mm3_kernel2PfS_S_iiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end2:
.size _Z26__device_stub__mm3_kernel2PfS_S_iiiii, .Lfunc_end2-_Z26__device_stub__mm3_kernel2PfS_S_iiiii
.cfi_endproc
# -- End function
.globl _Z26__device_stub__mm3_kernel3PfS_S_iiiii # -- Begin function _Z26__device_stub__mm3_kernel3PfS_S_iiiii
.p2align 4, 0x90
.type _Z26__device_stub__mm3_kernel3PfS_S_iiiii,@function
_Z26__device_stub__mm3_kernel3PfS_S_iiiii: # @_Z26__device_stub__mm3_kernel3PfS_S_iiiii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11mm3_kernel3PfS_S_iiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end3:
.size _Z26__device_stub__mm3_kernel3PfS_S_iiiii, .Lfunc_end3-_Z26__device_stub__mm3_kernel3PfS_S_iiiii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z7mm3CudaPfS_S_S_S_S_S_iiiii
.LCPI4_0:
.long 0x3d000000 # float 0.03125
.LCPI4_1:
.long 0x5f000000 # float 9.22337203E+18
.LCPI4_2:
.long 0x3e000000 # float 0.125
.text
.globl _Z7mm3CudaPfS_S_S_S_S_S_iiiii
.p2align 4, 0x90
.type _Z7mm3CudaPfS_S_S_S_S_S_iiiii,@function
_Z7mm3CudaPfS_S_S_S_S_S_iiiii: # @_Z7mm3CudaPfS_S_S_S_S_S_iiiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $232, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, 192(%rsp) # 8-byte Spill
movq %r8, 184(%rsp) # 8-byte Spill
movq %rcx, 224(%rsp) # 8-byte Spill
movq %rdx, 216(%rsp) # 8-byte Spill
movq %rsi, 208(%rsp) # 8-byte Spill
movq %rdi, %r14
cvtsi2ssl 304(%rsp), %xmm0
movss %xmm0, 108(%rsp) # 4-byte Spill
movabsq $34359738400, %rbx # imm = 0x800000020
mulss .LCPI4_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r13
movq %r13, %rax
subss .LCPI4_1(%rip), %xmm0
cvttss2si %xmm0, %rcx
sarq $63, %rax
xorps %xmm0, %xmm0
cvtsi2ssl 296(%rsp), %xmm0
andl %eax, %ecx
orl %ecx, %r13d
mulss .LCPI4_2(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r15
movq %r15, %rax
sarq $63, %rax
subss .LCPI4_1(%rip), %xmm0
cvttss2si %xmm0, %rcx
andl %eax, %ecx
orl %ecx, %r15d
shlq $32, %r15
orq %r15, %r13
xorps %xmm0, %xmm0
cvtsi2ssl 320(%rsp), %xmm0
mulss .LCPI4_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %rbp
movq %rbp, %rax
subss .LCPI4_1(%rip), %xmm0
cvttss2si %xmm0, %rcx
sarq $63, %rax
andl %eax, %ecx
orl %ecx, %ebp
movss 108(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
mulss .LCPI4_2(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r12
movq %r12, %rax
sarq $63, %rax
subss .LCPI4_1(%rip), %xmm0
cvttss2si %xmm0, %rcx
andl %eax, %ecx
orl %ecx, %r12d
shlq $32, %r12
orq %rbp, %r12
leaq 200(%rsp), %rdi
callq hipEventCreate
leaq 96(%rsp), %rdi
callq hipEventCreate
movq 200(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq %r13, %rdi
movl 328(%rsp), %r13d
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_2
# %bb.1:
movq %r14, 88(%rsp)
movq 208(%rsp), %rax # 8-byte Reload
movq %rax, 80(%rsp)
movq 184(%rsp), %rax # 8-byte Reload
movq %rax, 72(%rsp)
movl 296(%rsp), %eax
movl %eax, 20(%rsp)
movl 304(%rsp), %eax
movl %eax, 16(%rsp)
movl 312(%rsp), %eax
movl %eax, 12(%rsp)
movl 320(%rsp), %eax
movl %eax, 8(%rsp)
movl %r13d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
leaq 4(%rsp), %rax
movq %rax, 168(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11mm3_kernel1PfS_S_iiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_2:
orq %r15, %rbp
callq hipDeviceSynchronize
movq %r12, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_4
# %bb.3:
movq 216(%rsp), %rax # 8-byte Reload
movq %rax, 88(%rsp)
movq 224(%rsp), %rax # 8-byte Reload
movq %rax, 80(%rsp)
movq 192(%rsp), %rax # 8-byte Reload
movq %rax, 72(%rsp)
movl 296(%rsp), %eax
movl %eax, 20(%rsp)
movl 304(%rsp), %eax
movl %eax, 16(%rsp)
movl 312(%rsp), %eax
movl %eax, 12(%rsp)
movl 320(%rsp), %eax
movl %eax, 8(%rsp)
movl %r13d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
leaq 4(%rsp), %rax
movq %rax, 168(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11mm3_kernel2PfS_S_iiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_4:
callq hipDeviceSynchronize
movq %rbp, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_6
# %bb.5:
movq 288(%rsp), %rax
movq 184(%rsp), %rcx # 8-byte Reload
movq %rcx, 88(%rsp)
movq 192(%rsp), %rcx # 8-byte Reload
movq %rcx, 80(%rsp)
movq %rax, 72(%rsp)
movl 296(%rsp), %eax
movl %eax, 20(%rsp)
movl 304(%rsp), %eax
movl %eax, 16(%rsp)
movl 312(%rsp), %eax
movl %eax, 12(%rsp)
movl 320(%rsp), %eax
movl %eax, 8(%rsp)
movl %r13d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
leaq 4(%rsp), %rax
movq %rax, 168(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11mm3_kernel3PfS_S_iiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_6:
callq hipDeviceSynchronize
movq 96(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 96(%rsp), %rdi
callq hipEventSynchronize
movq 200(%rsp), %rsi
movq 96(%rsp), %rdx
leaq 112(%rsp), %rdi
callq hipEventElapsedTime
movq stdout(%rip), %rdi
movss 112(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %esi
movb $1, %al
callq fprintf
addq $232, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z7mm3CudaPfS_S_S_S_S_S_iiiii, .Lfunc_end4-_Z7mm3CudaPfS_S_S_S_S_S_iiiii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $64, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
cmpl $1, %edi
jg .LBB5_2
# %bb.1:
movl $.Lstr, %edi
callq puts@PLT
movl $1, %ebx
jmp .LBB5_3
.LBB5_2:
movq 8(%rsi), %r14
xorl %ebx, %ebx
movq %r14, %rdi
movq %rsi, %r15
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, NI(%rip)
movq %r14, %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, NJ(%rip)
movq %r14, %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, NK(%rip)
movq 8(%r15), %r14
movq %r14, %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, NL(%rip)
movq %r14, %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, NM(%rip)
movslq NI(%rip), %rax
movslq NK(%rip), %rsi
imulq %rax, %rsi
shlq $2, %rsi
leaq 32(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movslq NK(%rip), %rax
movslq NJ(%rip), %rsi
imulq %rax, %rsi
shlq $2, %rsi
leaq 24(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movslq NJ(%rip), %rax
movslq NM(%rip), %rsi
imulq %rax, %rsi
shlq $2, %rsi
leaq 16(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movslq NM(%rip), %rax
movslq NL(%rip), %rsi
imulq %rax, %rsi
shlq $2, %rsi
leaq 8(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movslq NI(%rip), %rax
movslq NJ(%rip), %rsi
imulq %rax, %rsi
shlq $2, %rsi
leaq 56(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movslq NJ(%rip), %rax
movslq NL(%rip), %rsi
imulq %rax, %rsi
shlq $2, %rsi
leaq 48(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movslq NI(%rip), %rax
movslq NL(%rip), %rsi
imulq %rax, %rsi
shlq $2, %rsi
leaq 40(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
movq 32(%rsp), %rdi
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
movq 8(%rsp), %rcx
callq _Z10init_arrayPfS_S_S_
movq 32(%rsp), %rdi
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
movq 8(%rsp), %rcx
movq 56(%rsp), %r8
movq 48(%rsp), %r9
movl NI(%rip), %eax
movl NJ(%rip), %r10d
movl NK(%rip), %r11d
movl NL(%rip), %r14d
movl NM(%rip), %r15d
pushq %r15
.cfi_adjust_cfa_offset 8
pushq %r14
.cfi_adjust_cfa_offset 8
pushq %r11
.cfi_adjust_cfa_offset 8
pushq %r10
.cfi_adjust_cfa_offset 8
pushq %rax
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq _Z7mm3CudaPfS_S_S_S_S_S_iiiii
addq $48, %rsp
.cfi_adjust_cfa_offset -48
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
.LBB5_3:
movl %ebx, %eax
addq $64, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11mm3_kernel1PfS_S_iiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11mm3_kernel2PfS_S_iiiii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11mm3_kernel3PfS_S_iiiii, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type NI,@object # @NI
.bss
.globl NI
.p2align 2, 0x0
NI:
.long 0 # 0x0
.size NI, 4
.type NJ,@object # @NJ
.globl NJ
.p2align 2, 0x0
NJ:
.long 0 # 0x0
.size NJ, 4
.type NK,@object # @NK
.globl NK
.p2align 2, 0x0
NK:
.long 0 # 0x0
.size NK, 4
.type NL,@object # @NL
.globl NL
.p2align 2, 0x0
NL:
.long 0 # 0x0
.size NL, 4
.type NM,@object # @NM
.globl NM
.p2align 2, 0x0
NM:
.long 0 # 0x0
.size NM, 4
.type _Z11mm3_kernel1PfS_S_iiiii,@object # @_Z11mm3_kernel1PfS_S_iiiii
.section .rodata,"a",@progbits
.globl _Z11mm3_kernel1PfS_S_iiiii
.p2align 3, 0x0
_Z11mm3_kernel1PfS_S_iiiii:
.quad _Z26__device_stub__mm3_kernel1PfS_S_iiiii
.size _Z11mm3_kernel1PfS_S_iiiii, 8
.type _Z11mm3_kernel2PfS_S_iiiii,@object # @_Z11mm3_kernel2PfS_S_iiiii
.globl _Z11mm3_kernel2PfS_S_iiiii
.p2align 3, 0x0
_Z11mm3_kernel2PfS_S_iiiii:
.quad _Z26__device_stub__mm3_kernel2PfS_S_iiiii
.size _Z11mm3_kernel2PfS_S_iiiii, 8
.type _Z11mm3_kernel3PfS_S_iiiii,@object # @_Z11mm3_kernel3PfS_S_iiiii
.globl _Z11mm3_kernel3PfS_S_iiiii
.p2align 3, 0x0
_Z11mm3_kernel3PfS_S_iiiii:
.quad _Z26__device_stub__mm3_kernel3PfS_S_iiiii
.size _Z11mm3_kernel3PfS_S_iiiii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%0.6lf\n"
.size .L.str, 8
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11mm3_kernel1PfS_S_iiiii"
.size .L__unnamed_1, 27
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z11mm3_kernel2PfS_S_iiiii"
.size .L__unnamed_2, 27
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z11mm3_kernel3PfS_S_iiiii"
.size .L__unnamed_3, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "please no troll"
.size .Lstr, 16
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__mm3_kernel1PfS_S_iiiii
.addrsig_sym _Z26__device_stub__mm3_kernel2PfS_S_iiiii
.addrsig_sym _Z26__device_stub__mm3_kernel3PfS_S_iiiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11mm3_kernel1PfS_S_iiiii
.addrsig_sym _Z11mm3_kernel2PfS_S_iiiii
.addrsig_sym _Z11mm3_kernel3PfS_S_iiiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void grad(float * val, int * row_ind, int *col_ind, float * mat_err, int nnz, float *act, float *label, float *w, float learning_rate) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < nnz) {
int r = row_ind[tid];
int c = col_ind[tid];
float v = val[tid];
mat_err[tid] = abs(label[r] - act[r]);
float err = v * (label[r] - act[r]);
atomicAdd(&w[c], learning_rate * err);
}
} | code for sm_80
Function : _Z4gradPfPiS0_S_iS_S_S_f
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R12, SR_CTAID.X ; /* 0x00000000000c7919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R12, R12, c[0x0][0x0], R3 ; /* 0x000000000c0c7a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R12, c[0x0][0x180], PT ; /* 0x000060000c007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R15, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0f7435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R12, R15, c[0x0][0x168] ; /* 0x00005a000c027625 */
/* 0x000fcc00078e020f */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ IMAD.WIDE R10, R2, R15, c[0x0][0x188] ; /* 0x00006200020a7625 */
/* 0x004fc800078e020f */
/*00b0*/ IMAD.WIDE R8, R2, R15.reuse, c[0x0][0x190] ; /* 0x0000640002087625 */
/* 0x080fe200078e020f */
/*00c0*/ LDG.E R0, [R10.64] ; /* 0x000000040a007981 */
/* 0x000ea8000c1e1900 */
/*00d0*/ LDG.E R5, [R8.64] ; /* 0x0000000408057981 */
/* 0x000ea2000c1e1900 */
/*00e0*/ IMAD.WIDE R6, R12, R15, c[0x0][0x160] ; /* 0x000058000c067625 */
/* 0x000fcc00078e020f */
/*00f0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ee2000c1e1900 */
/*0100*/ FADD R0, -R0, R5 ; /* 0x0000000500007221 */
/* 0x004fe40000000100 */
/*0110*/ IMAD.WIDE R4, R12, R15, c[0x0][0x170] ; /* 0x00005c000c047625 */
/* 0x000fc800078e020f */
/*0120*/ IMAD.WIDE R12, R12, R15, c[0x0][0x178] ; /* 0x00005e000c0c7625 */
/* 0x000fe400078e020f */
/*0130*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea4000c1e1900 */
/*0140*/ FADD R17, |R0|, -RZ ; /* 0x800000ff00117221 */
/* 0x000fca0000000200 */
/*0150*/ STG.E [R12.64], R17 ; /* 0x000000110c007986 */
/* 0x000fe8000c101904 */
/*0160*/ LDG.E R0, [R10.64] ; /* 0x000000040a007981 */
/* 0x000f28000c1e1900 */
/*0170*/ LDG.E R3, [R8.64] ; /* 0x0000000408037981 */
/* 0x000f24000c1e1900 */
/*0180*/ FADD R3, -R0, R3 ; /* 0x0000000300037221 */
/* 0x010fc80000000100 */
/*0190*/ FMUL R0, R6, R3 ; /* 0x0000000306007220 */
/* 0x008fe40000400000 */
/*01a0*/ IMAD.WIDE R2, R4, R15, c[0x0][0x198] ; /* 0x0000660004027625 */
/* 0x004fc800078e020f */
/*01b0*/ FMUL R15, R0, c[0x0][0x1a0] ; /* 0x00006800000f7a20 */
/* 0x000fca0000400000 */
/*01c0*/ RED.E.ADD.F32.FTZ.RN.STRONG.GPU [R2.64], R15 ; /* 0x0000000f0200798e */
/* 0x000fe2000c10e784 */
/*01d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void grad(float * val, int * row_ind, int *col_ind, float * mat_err, int nnz, float *act, float *label, float *w, float learning_rate) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < nnz) {
int r = row_ind[tid];
int c = col_ind[tid];
float v = val[tid];
mat_err[tid] = abs(label[r] - act[r]);
float err = v * (label[r] - act[r]);
atomicAdd(&w[c], learning_rate * err);
}
} | .file "tmpxft_001add57_00000000-6_grad.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z4gradPfPiS0_S_iS_S_S_fPfPiS0_S_iS_S_S_f
.type _Z38__device_stub__Z4gradPfPiS0_S_iS_S_S_fPfPiS0_S_iS_S_S_f, @function
_Z38__device_stub__Z4gradPfPiS0_S_iS_S_S_fPfPiS0_S_iS_S_S_f:
.LFB2051:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movl %r8d, 28(%rsp)
movq %r9, 16(%rsp)
movss %xmm0, 24(%rsp)
movq 224(%rsp), %rax
movq %rax, 8(%rsp)
movq 232(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 28(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 8(%rsp), %rax
movq %rax, 176(%rsp)
movq %rsp, %rax
movq %rax, 184(%rsp)
leaq 24(%rsp), %rax
movq %rax, 192(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z4gradPfPiS0_S_iS_S_S_f(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z4gradPfPiS0_S_iS_S_S_fPfPiS0_S_iS_S_S_f, .-_Z38__device_stub__Z4gradPfPiS0_S_iS_S_S_fPfPiS0_S_iS_S_S_f
.globl _Z4gradPfPiS0_S_iS_S_S_f
.type _Z4gradPfPiS0_S_iS_S_S_f, @function
_Z4gradPfPiS0_S_iS_S_S_f:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z38__device_stub__Z4gradPfPiS0_S_iS_S_S_fPfPiS0_S_iS_S_S_f
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z4gradPfPiS0_S_iS_S_S_f, .-_Z4gradPfPiS0_S_iS_S_S_f
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z4gradPfPiS0_S_iS_S_S_f"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z4gradPfPiS0_S_iS_S_S_f(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void grad(float * val, int * row_ind, int *col_ind, float * mat_err, int nnz, float *act, float *label, float *w, float learning_rate) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < nnz) {
int r = row_ind[tid];
int c = col_ind[tid];
float v = val[tid];
mat_err[tid] = abs(label[r] - act[r]);
float err = v * (label[r] - act[r]);
atomicAdd(&w[c], learning_rate * err);
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void grad(float * val, int * row_ind, int *col_ind, float * mat_err, int nnz, float *act, float *label, float *w, float learning_rate) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < nnz) {
int r = row_ind[tid];
int c = col_ind[tid];
float v = val[tid];
mat_err[tid] = abs(label[r] - act[r]);
float err = v * (label[r] - act[r]);
atomicAdd(&w[c], learning_rate * err);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void grad(float * val, int * row_ind, int *col_ind, float * mat_err, int nnz, float *act, float *label, float *w, float learning_rate) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < nnz) {
int r = row_ind[tid];
int c = col_ind[tid];
float v = val[tid];
mat_err[tid] = abs(label[r] - act[r]);
float err = v * (label[r] - act[r]);
atomicAdd(&w[c], learning_rate * err);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4gradPfPiS0_S_iS_S_S_f
.globl _Z4gradPfPiS0_S_iS_S_S_f
.p2align 8
.type _Z4gradPfPiS0_S_iS_S_S_f,@function
_Z4gradPfPiS0_S_iS_S_S_f:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x54
s_load_b32 s3, s[0:1], 0x20
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_3
s_load_b256 s[4:11], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_clause 0x2
s_load_b128 s[12:15], s[0:1], 0x28
s_load_b64 s[2:3], s[0:1], 0x38
s_load_b32 s0, s[0:1], 0x40
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v4, vcc_lo, s14, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s15, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s12, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s13, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s8, v0
global_load_b32 v9, v[4:5], off
global_load_b32 v10, v[2:3], off
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v1, vcc_lo
global_load_b32 v6, v[6:7], off
v_add_co_u32 v7, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v8, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s10, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s11, v1, vcc_lo
global_load_b32 v8, v[7:8], off
s_waitcnt vmcnt(2)
v_sub_f32_e32 v9, v9, v10
s_delay_alu instid0(VALU_DEP_1)
v_and_b32_e32 v9, 0x7fffffff, v9
s_waitcnt vmcnt(1)
v_ashrrev_i32_e32 v7, 31, v6
global_store_b32 v[0:1], v9, off
global_load_b32 v4, v[4:5], off
global_load_b32 v2, v[2:3], off
v_lshlrev_b64 v[0:1], 2, v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(1)
v_sub_f32_e32 v2, v4, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v2, v8, v2
v_mul_f32_e32 v4, s0, v2
s_mov_b32 s0, 0
.LBB0_2:
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v2, v3, v4
global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v2, v3
v_mov_b32_e32 v3, v2
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4gradPfPiS0_S_iS_S_S_f
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 328
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4gradPfPiS0_S_iS_S_S_f, .Lfunc_end0-_Z4gradPfPiS0_S_iS_S_S_f
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .offset: 64
.size: 4
.value_kind: by_value
- .offset: 72
.size: 4
.value_kind: hidden_block_count_x
- .offset: 76
.size: 4
.value_kind: hidden_block_count_y
- .offset: 80
.size: 4
.value_kind: hidden_block_count_z
- .offset: 84
.size: 2
.value_kind: hidden_group_size_x
- .offset: 86
.size: 2
.value_kind: hidden_group_size_y
- .offset: 88
.size: 2
.value_kind: hidden_group_size_z
- .offset: 90
.size: 2
.value_kind: hidden_remainder_x
- .offset: 92
.size: 2
.value_kind: hidden_remainder_y
- .offset: 94
.size: 2
.value_kind: hidden_remainder_z
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 128
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 136
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 328
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4gradPfPiS0_S_iS_S_S_f
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z4gradPfPiS0_S_iS_S_S_f.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void grad(float * val, int * row_ind, int *col_ind, float * mat_err, int nnz, float *act, float *label, float *w, float learning_rate) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < nnz) {
int r = row_ind[tid];
int c = col_ind[tid];
float v = val[tid];
mat_err[tid] = abs(label[r] - act[r]);
float err = v * (label[r] - act[r]);
atomicAdd(&w[c], learning_rate * err);
}
} | .text
.file "grad.hip"
.globl _Z19__device_stub__gradPfPiS0_S_iS_S_S_f # -- Begin function _Z19__device_stub__gradPfPiS0_S_iS_S_S_f
.p2align 4, 0x90
.type _Z19__device_stub__gradPfPiS0_S_iS_S_S_f,@function
_Z19__device_stub__gradPfPiS0_S_iS_S_S_f: # @_Z19__device_stub__gradPfPiS0_S_iS_S_S_f
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 4(%rsp)
movq %r9, 56(%rsp)
movss %xmm0, (%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
movq %rsp, %rax
movq %rax, 160(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z4gradPfPiS0_S_iS_S_S_f, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z19__device_stub__gradPfPiS0_S_iS_S_S_f, .Lfunc_end0-_Z19__device_stub__gradPfPiS0_S_iS_S_S_f
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4gradPfPiS0_S_iS_S_S_f, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4gradPfPiS0_S_iS_S_S_f,@object # @_Z4gradPfPiS0_S_iS_S_S_f
.section .rodata,"a",@progbits
.globl _Z4gradPfPiS0_S_iS_S_S_f
.p2align 3, 0x0
_Z4gradPfPiS0_S_iS_S_S_f:
.quad _Z19__device_stub__gradPfPiS0_S_iS_S_S_f
.size _Z4gradPfPiS0_S_iS_S_S_f, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z4gradPfPiS0_S_iS_S_S_f"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__gradPfPiS0_S_iS_S_S_f
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4gradPfPiS0_S_iS_S_S_f
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z4gradPfPiS0_S_iS_S_S_f
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R12, SR_CTAID.X ; /* 0x00000000000c7919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R12, R12, c[0x0][0x0], R3 ; /* 0x000000000c0c7a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R12, c[0x0][0x180], PT ; /* 0x000060000c007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R15, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0f7435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R12, R15, c[0x0][0x168] ; /* 0x00005a000c027625 */
/* 0x000fcc00078e020f */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ IMAD.WIDE R10, R2, R15, c[0x0][0x188] ; /* 0x00006200020a7625 */
/* 0x004fc800078e020f */
/*00b0*/ IMAD.WIDE R8, R2, R15.reuse, c[0x0][0x190] ; /* 0x0000640002087625 */
/* 0x080fe200078e020f */
/*00c0*/ LDG.E R0, [R10.64] ; /* 0x000000040a007981 */
/* 0x000ea8000c1e1900 */
/*00d0*/ LDG.E R5, [R8.64] ; /* 0x0000000408057981 */
/* 0x000ea2000c1e1900 */
/*00e0*/ IMAD.WIDE R6, R12, R15, c[0x0][0x160] ; /* 0x000058000c067625 */
/* 0x000fcc00078e020f */
/*00f0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ee2000c1e1900 */
/*0100*/ FADD R0, -R0, R5 ; /* 0x0000000500007221 */
/* 0x004fe40000000100 */
/*0110*/ IMAD.WIDE R4, R12, R15, c[0x0][0x170] ; /* 0x00005c000c047625 */
/* 0x000fc800078e020f */
/*0120*/ IMAD.WIDE R12, R12, R15, c[0x0][0x178] ; /* 0x00005e000c0c7625 */
/* 0x000fe400078e020f */
/*0130*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea4000c1e1900 */
/*0140*/ FADD R17, |R0|, -RZ ; /* 0x800000ff00117221 */
/* 0x000fca0000000200 */
/*0150*/ STG.E [R12.64], R17 ; /* 0x000000110c007986 */
/* 0x000fe8000c101904 */
/*0160*/ LDG.E R0, [R10.64] ; /* 0x000000040a007981 */
/* 0x000f28000c1e1900 */
/*0170*/ LDG.E R3, [R8.64] ; /* 0x0000000408037981 */
/* 0x000f24000c1e1900 */
/*0180*/ FADD R3, -R0, R3 ; /* 0x0000000300037221 */
/* 0x010fc80000000100 */
/*0190*/ FMUL R0, R6, R3 ; /* 0x0000000306007220 */
/* 0x008fe40000400000 */
/*01a0*/ IMAD.WIDE R2, R4, R15, c[0x0][0x198] ; /* 0x0000660004027625 */
/* 0x004fc800078e020f */
/*01b0*/ FMUL R15, R0, c[0x0][0x1a0] ; /* 0x00006800000f7a20 */
/* 0x000fca0000400000 */
/*01c0*/ RED.E.ADD.F32.FTZ.RN.STRONG.GPU [R2.64], R15 ; /* 0x0000000f0200798e */
/* 0x000fe2000c10e784 */
/*01d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4gradPfPiS0_S_iS_S_S_f
.globl _Z4gradPfPiS0_S_iS_S_S_f
.p2align 8
.type _Z4gradPfPiS0_S_iS_S_S_f,@function
_Z4gradPfPiS0_S_iS_S_S_f:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x54
s_load_b32 s3, s[0:1], 0x20
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_3
s_load_b256 s[4:11], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_clause 0x2
s_load_b128 s[12:15], s[0:1], 0x28
s_load_b64 s[2:3], s[0:1], 0x38
s_load_b32 s0, s[0:1], 0x40
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v4, vcc_lo, s14, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s15, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s12, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s13, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s8, v0
global_load_b32 v9, v[4:5], off
global_load_b32 v10, v[2:3], off
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v1, vcc_lo
global_load_b32 v6, v[6:7], off
v_add_co_u32 v7, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v8, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s10, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s11, v1, vcc_lo
global_load_b32 v8, v[7:8], off
s_waitcnt vmcnt(2)
v_sub_f32_e32 v9, v9, v10
s_delay_alu instid0(VALU_DEP_1)
v_and_b32_e32 v9, 0x7fffffff, v9
s_waitcnt vmcnt(1)
v_ashrrev_i32_e32 v7, 31, v6
global_store_b32 v[0:1], v9, off
global_load_b32 v4, v[4:5], off
global_load_b32 v2, v[2:3], off
v_lshlrev_b64 v[0:1], 2, v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(1)
v_sub_f32_e32 v2, v4, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v2, v8, v2
v_mul_f32_e32 v4, s0, v2
s_mov_b32 s0, 0
.LBB0_2:
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v2, v3, v4
global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v2, v3
v_mov_b32_e32 v3, v2
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4gradPfPiS0_S_iS_S_S_f
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 328
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4gradPfPiS0_S_iS_S_S_f, .Lfunc_end0-_Z4gradPfPiS0_S_iS_S_S_f
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .offset: 64
.size: 4
.value_kind: by_value
- .offset: 72
.size: 4
.value_kind: hidden_block_count_x
- .offset: 76
.size: 4
.value_kind: hidden_block_count_y
- .offset: 80
.size: 4
.value_kind: hidden_block_count_z
- .offset: 84
.size: 2
.value_kind: hidden_group_size_x
- .offset: 86
.size: 2
.value_kind: hidden_group_size_y
- .offset: 88
.size: 2
.value_kind: hidden_group_size_z
- .offset: 90
.size: 2
.value_kind: hidden_remainder_x
- .offset: 92
.size: 2
.value_kind: hidden_remainder_y
- .offset: 94
.size: 2
.value_kind: hidden_remainder_z
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 128
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 136
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 328
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4gradPfPiS0_S_iS_S_S_f
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z4gradPfPiS0_S_iS_S_S_f.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001add57_00000000-6_grad.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z4gradPfPiS0_S_iS_S_S_fPfPiS0_S_iS_S_S_f
.type _Z38__device_stub__Z4gradPfPiS0_S_iS_S_S_fPfPiS0_S_iS_S_S_f, @function
_Z38__device_stub__Z4gradPfPiS0_S_iS_S_S_fPfPiS0_S_iS_S_S_f:
.LFB2051:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movl %r8d, 28(%rsp)
movq %r9, 16(%rsp)
movss %xmm0, 24(%rsp)
movq 224(%rsp), %rax
movq %rax, 8(%rsp)
movq 232(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 28(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 8(%rsp), %rax
movq %rax, 176(%rsp)
movq %rsp, %rax
movq %rax, 184(%rsp)
leaq 24(%rsp), %rax
movq %rax, 192(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z4gradPfPiS0_S_iS_S_S_f(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z4gradPfPiS0_S_iS_S_S_fPfPiS0_S_iS_S_S_f, .-_Z38__device_stub__Z4gradPfPiS0_S_iS_S_S_fPfPiS0_S_iS_S_S_f
.globl _Z4gradPfPiS0_S_iS_S_S_f
.type _Z4gradPfPiS0_S_iS_S_S_f, @function
_Z4gradPfPiS0_S_iS_S_S_f:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z38__device_stub__Z4gradPfPiS0_S_iS_S_S_fPfPiS0_S_iS_S_S_f
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z4gradPfPiS0_S_iS_S_S_f, .-_Z4gradPfPiS0_S_iS_S_S_f
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z4gradPfPiS0_S_iS_S_S_f"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z4gradPfPiS0_S_iS_S_S_f(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "grad.hip"
.globl _Z19__device_stub__gradPfPiS0_S_iS_S_S_f # -- Begin function _Z19__device_stub__gradPfPiS0_S_iS_S_S_f
.p2align 4, 0x90
.type _Z19__device_stub__gradPfPiS0_S_iS_S_S_f,@function
_Z19__device_stub__gradPfPiS0_S_iS_S_S_f: # @_Z19__device_stub__gradPfPiS0_S_iS_S_S_f
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 4(%rsp)
movq %r9, 56(%rsp)
movss %xmm0, (%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
movq %rsp, %rax
movq %rax, 160(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z4gradPfPiS0_S_iS_S_S_f, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z19__device_stub__gradPfPiS0_S_iS_S_S_f, .Lfunc_end0-_Z19__device_stub__gradPfPiS0_S_iS_S_S_f
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4gradPfPiS0_S_iS_S_S_f, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4gradPfPiS0_S_iS_S_S_f,@object # @_Z4gradPfPiS0_S_iS_S_S_f
.section .rodata,"a",@progbits
.globl _Z4gradPfPiS0_S_iS_S_S_f
.p2align 3, 0x0
_Z4gradPfPiS0_S_iS_S_S_f:
.quad _Z19__device_stub__gradPfPiS0_S_iS_S_S_f
.size _Z4gradPfPiS0_S_iS_S_S_f, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z4gradPfPiS0_S_iS_S_S_f"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__gradPfPiS0_S_iS_S_S_f
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4gradPfPiS0_S_iS_S_S_f
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void dense_add(size_t sz, float_t* src, float_t* dest)
{
size_t srcIndex = threadIdx.x;
size_t destIndex = blockIdx.x*blockDim.x + threadIdx.x;
if(destIndex < sz)
{
dest[destIndex] += src[srcIndex];
}
} | code for sm_80
Function : _Z9dense_addmPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fc80003f06070 */
/*0050*/ ISETP.GE.U32.AND.EX P0, PT, RZ, c[0x0][0x164], PT, P0 ; /* 0x00005900ff007a0c */
/* 0x000fda0003f06100 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*0080*/ LEA R4, P0, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000047a11 */
/* 0x000fe200078010ff */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00a0*/ LEA.HI.X R5, R0, c[0x0][0x174], RZ, 0x2, P0 ; /* 0x00005d0000057a11 */
/* 0x000fca00000f14ff */
/*00b0*/ IMAD.WIDE.U32 R2, R3, R2, c[0x0][0x168] ; /* 0x00005a0003027625 */
/* 0x000fe200078e0002 */
/*00c0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000eaa000c1e1900 */
/*00d0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea4000c1e1900 */
/*00e0*/ FADD R7, R0, R3 ; /* 0x0000000300077221 */
/* 0x004fca0000000000 */
/*00f0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void dense_add(size_t sz, float_t* src, float_t* dest)
{
size_t srcIndex = threadIdx.x;
size_t destIndex = blockIdx.x*blockDim.x + threadIdx.x;
if(destIndex < sz)
{
dest[destIndex] += src[srcIndex];
}
} | .file "tmpxft_0016a87f_00000000-6_dense_add.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z9dense_addmPfS_mPfS_
.type _Z31__device_stub__Z9dense_addmPfS_mPfS_, @function
_Z31__device_stub__Z9dense_addmPfS_mPfS_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9dense_addmPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z9dense_addmPfS_mPfS_, .-_Z31__device_stub__Z9dense_addmPfS_mPfS_
.globl _Z9dense_addmPfS_
.type _Z9dense_addmPfS_, @function
_Z9dense_addmPfS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z9dense_addmPfS_mPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9dense_addmPfS_, .-_Z9dense_addmPfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9dense_addmPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9dense_addmPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void dense_add(size_t sz, float_t* src, float_t* dest)
{
size_t srcIndex = threadIdx.x;
size_t destIndex = blockIdx.x*blockDim.x + threadIdx.x;
if(destIndex < sz)
{
dest[destIndex] += src[srcIndex];
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void dense_add(size_t sz, float_t* src, float_t* dest)
{
size_t srcIndex = threadIdx.x;
size_t destIndex = blockIdx.x*blockDim.x + threadIdx.x;
if(destIndex < sz)
{
dest[destIndex] += src[srcIndex];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void dense_add(size_t sz, float_t* src, float_t* dest)
{
size_t srcIndex = threadIdx.x;
size_t destIndex = blockIdx.x*blockDim.x + threadIdx.x;
if(destIndex < sz)
{
dest[destIndex] += src[srcIndex];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9dense_addmPfS_
.globl _Z9dense_addmPfS_
.p2align 8
.type _Z9dense_addmPfS_,@function
_Z9dense_addmPfS_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b64 s[2:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[1:2]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x8
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_lshlrev_b32_e32 v3, 2, v0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s2, v1
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v2, vcc_lo
global_load_b32 v2, v3, s[0:1]
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9dense_addmPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9dense_addmPfS_, .Lfunc_end0-_Z9dense_addmPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9dense_addmPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9dense_addmPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void dense_add(size_t sz, float_t* src, float_t* dest)
{
size_t srcIndex = threadIdx.x;
size_t destIndex = blockIdx.x*blockDim.x + threadIdx.x;
if(destIndex < sz)
{
dest[destIndex] += src[srcIndex];
}
} | .text
.file "dense_add.hip"
.globl _Z24__device_stub__dense_addmPfS_ # -- Begin function _Z24__device_stub__dense_addmPfS_
.p2align 4, 0x90
.type _Z24__device_stub__dense_addmPfS_,@function
_Z24__device_stub__dense_addmPfS_: # @_Z24__device_stub__dense_addmPfS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9dense_addmPfS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z24__device_stub__dense_addmPfS_, .Lfunc_end0-_Z24__device_stub__dense_addmPfS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9dense_addmPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9dense_addmPfS_,@object # @_Z9dense_addmPfS_
.section .rodata,"a",@progbits
.globl _Z9dense_addmPfS_
.p2align 3, 0x0
_Z9dense_addmPfS_:
.quad _Z24__device_stub__dense_addmPfS_
.size _Z9dense_addmPfS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9dense_addmPfS_"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__dense_addmPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9dense_addmPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9dense_addmPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fc80003f06070 */
/*0050*/ ISETP.GE.U32.AND.EX P0, PT, RZ, c[0x0][0x164], PT, P0 ; /* 0x00005900ff007a0c */
/* 0x000fda0003f06100 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*0080*/ LEA R4, P0, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000047a11 */
/* 0x000fe200078010ff */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00a0*/ LEA.HI.X R5, R0, c[0x0][0x174], RZ, 0x2, P0 ; /* 0x00005d0000057a11 */
/* 0x000fca00000f14ff */
/*00b0*/ IMAD.WIDE.U32 R2, R3, R2, c[0x0][0x168] ; /* 0x00005a0003027625 */
/* 0x000fe200078e0002 */
/*00c0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000eaa000c1e1900 */
/*00d0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea4000c1e1900 */
/*00e0*/ FADD R7, R0, R3 ; /* 0x0000000300077221 */
/* 0x004fca0000000000 */
/*00f0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9dense_addmPfS_
.globl _Z9dense_addmPfS_
.p2align 8
.type _Z9dense_addmPfS_,@function
_Z9dense_addmPfS_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b64 s[2:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[1:2]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x8
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_lshlrev_b32_e32 v3, 2, v0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s2, v1
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v2, vcc_lo
global_load_b32 v2, v3, s[0:1]
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9dense_addmPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9dense_addmPfS_, .Lfunc_end0-_Z9dense_addmPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9dense_addmPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9dense_addmPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0016a87f_00000000-6_dense_add.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z9dense_addmPfS_mPfS_
.type _Z31__device_stub__Z9dense_addmPfS_mPfS_, @function
_Z31__device_stub__Z9dense_addmPfS_mPfS_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9dense_addmPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z9dense_addmPfS_mPfS_, .-_Z31__device_stub__Z9dense_addmPfS_mPfS_
.globl _Z9dense_addmPfS_
.type _Z9dense_addmPfS_, @function
_Z9dense_addmPfS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z9dense_addmPfS_mPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9dense_addmPfS_, .-_Z9dense_addmPfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9dense_addmPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9dense_addmPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "dense_add.hip"
.globl _Z24__device_stub__dense_addmPfS_ # -- Begin function _Z24__device_stub__dense_addmPfS_
.p2align 4, 0x90
.type _Z24__device_stub__dense_addmPfS_,@function
_Z24__device_stub__dense_addmPfS_: # @_Z24__device_stub__dense_addmPfS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9dense_addmPfS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z24__device_stub__dense_addmPfS_, .Lfunc_end0-_Z24__device_stub__dense_addmPfS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9dense_addmPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9dense_addmPfS_,@object # @_Z9dense_addmPfS_
.section .rodata,"a",@progbits
.globl _Z9dense_addmPfS_
.p2align 3, 0x0
_Z9dense_addmPfS_:
.quad _Z24__device_stub__dense_addmPfS_
.size _Z9dense_addmPfS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9dense_addmPfS_"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__dense_addmPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9dense_addmPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void expKernel(float* Z, float* A, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
A[id] = __expf(-Z[id]);
}
} | code for sm_80
Function : _Z9expKernelPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fcc00078e0205 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fc800078e0205 */
/*00b0*/ FMUL R0, R2, -1.4426950216293334961 ; /* 0xbfb8aa3b02007820 */
/* 0x004fca0000400000 */
/*00c0*/ FSETP.GEU.AND P0, PT, R0, -126, PT ; /* 0xc2fc00000000780b */
/* 0x000fda0003f0e000 */
/*00d0*/ @!P0 FMUL R0, R0, 0.5 ; /* 0x3f00000000008820 */
/* 0x000fc80000400000 */
/*00e0*/ MUFU.EX2 R7, R0 ; /* 0x0000000000077308 */
/* 0x000e240000000800 */
/*00f0*/ @!P0 FMUL R7, R7, R7 ; /* 0x0000000707078220 */
/* 0x001fca0000400000 */
/*0100*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0110*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0120*/ BRA 0x120; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void expKernel(float* Z, float* A, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
A[id] = __expf(-Z[id]);
}
} | .file "tmpxft_0016715a_00000000-6_expKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z9expKernelPfS_iPfS_i
.type _Z31__device_stub__Z9expKernelPfS_iPfS_i, @function
_Z31__device_stub__Z9expKernelPfS_iPfS_i:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9expKernelPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z9expKernelPfS_iPfS_i, .-_Z31__device_stub__Z9expKernelPfS_iPfS_i
.globl _Z9expKernelPfS_i
.type _Z9expKernelPfS_i, @function
_Z9expKernelPfS_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z9expKernelPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9expKernelPfS_i, .-_Z9expKernelPfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9expKernelPfS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9expKernelPfS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void expKernel(float* Z, float* A, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
A[id] = __expf(-Z[id]);
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void expKernel(float* Z, float* A, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
A[id] = __expf(-Z[id]);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void expKernel(float* Z, float* A, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
A[id] = __expf(-Z[id]);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9expKernelPfS_i
.globl _Z9expKernelPfS_i
.p2align 8
.type _Z9expKernelPfS_i,@function
_Z9expKernelPfS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v2, 0xbfb8aa3b, v2
s_delay_alu instid0(VALU_DEP_1)
v_exp_f32_e32 v2, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9expKernelPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9expKernelPfS_i, .Lfunc_end0-_Z9expKernelPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9expKernelPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9expKernelPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void expKernel(float* Z, float* A, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
A[id] = __expf(-Z[id]);
}
} | .text
.file "expKernel.hip"
.globl _Z24__device_stub__expKernelPfS_i # -- Begin function _Z24__device_stub__expKernelPfS_i
.p2align 4, 0x90
.type _Z24__device_stub__expKernelPfS_i,@function
_Z24__device_stub__expKernelPfS_i: # @_Z24__device_stub__expKernelPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9expKernelPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z24__device_stub__expKernelPfS_i, .Lfunc_end0-_Z24__device_stub__expKernelPfS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9expKernelPfS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9expKernelPfS_i,@object # @_Z9expKernelPfS_i
.section .rodata,"a",@progbits
.globl _Z9expKernelPfS_i
.p2align 3, 0x0
_Z9expKernelPfS_i:
.quad _Z24__device_stub__expKernelPfS_i
.size _Z9expKernelPfS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9expKernelPfS_i"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__expKernelPfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9expKernelPfS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9expKernelPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fcc00078e0205 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fc800078e0205 */
/*00b0*/ FMUL R0, R2, -1.4426950216293334961 ; /* 0xbfb8aa3b02007820 */
/* 0x004fca0000400000 */
/*00c0*/ FSETP.GEU.AND P0, PT, R0, -126, PT ; /* 0xc2fc00000000780b */
/* 0x000fda0003f0e000 */
/*00d0*/ @!P0 FMUL R0, R0, 0.5 ; /* 0x3f00000000008820 */
/* 0x000fc80000400000 */
/*00e0*/ MUFU.EX2 R7, R0 ; /* 0x0000000000077308 */
/* 0x000e240000000800 */
/*00f0*/ @!P0 FMUL R7, R7, R7 ; /* 0x0000000707078220 */
/* 0x001fca0000400000 */
/*0100*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0110*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0120*/ BRA 0x120; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9expKernelPfS_i
.globl _Z9expKernelPfS_i
.p2align 8
.type _Z9expKernelPfS_i,@function
_Z9expKernelPfS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v2, 0xbfb8aa3b, v2
s_delay_alu instid0(VALU_DEP_1)
v_exp_f32_e32 v2, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9expKernelPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9expKernelPfS_i, .Lfunc_end0-_Z9expKernelPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9expKernelPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9expKernelPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0016715a_00000000-6_expKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z9expKernelPfS_iPfS_i
.type _Z31__device_stub__Z9expKernelPfS_iPfS_i, @function
_Z31__device_stub__Z9expKernelPfS_iPfS_i:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9expKernelPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z9expKernelPfS_iPfS_i, .-_Z31__device_stub__Z9expKernelPfS_iPfS_i
.globl _Z9expKernelPfS_i
.type _Z9expKernelPfS_i, @function
_Z9expKernelPfS_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z9expKernelPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9expKernelPfS_i, .-_Z9expKernelPfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9expKernelPfS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9expKernelPfS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "expKernel.hip"
.globl _Z24__device_stub__expKernelPfS_i # -- Begin function _Z24__device_stub__expKernelPfS_i
.p2align 4, 0x90
.type _Z24__device_stub__expKernelPfS_i,@function
_Z24__device_stub__expKernelPfS_i: # @_Z24__device_stub__expKernelPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9expKernelPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z24__device_stub__expKernelPfS_i, .Lfunc_end0-_Z24__device_stub__expKernelPfS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9expKernelPfS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9expKernelPfS_i,@object # @_Z9expKernelPfS_i
.section .rodata,"a",@progbits
.globl _Z9expKernelPfS_i
.p2align 3, 0x0
_Z9expKernelPfS_i:
.quad _Z24__device_stub__expKernelPfS_i
.size _Z9expKernelPfS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9expKernelPfS_i"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__expKernelPfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9expKernelPfS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /**
* KERNEL cuAdd() - Takes 2 input arrays of same size N and adds them into C.
* Locations are found by computing the global index of each thread.
* @return
*/
__global__ void cuAdd(int *a,int *b,int *c, int N)
{
// global index
int offset = blockDim.x * blockIdx.x + threadIdx.x;
if(offset < N)
{
c[offset] = a[offset] + b[offset];
}
} | code for sm_80
Function : _Z5cuAddPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /**
* KERNEL cuAdd() - Takes 2 input arrays of same size N and adds them into C.
* Locations are found by computing the global index of each thread.
* @return
*/
__global__ void cuAdd(int *a,int *b,int *c, int N)
{
// global index
int offset = blockDim.x * blockIdx.x + threadIdx.x;
if(offset < N)
{
c[offset] = a[offset] + b[offset];
}
} | .file "tmpxft_00108ca4_00000000-6_d_VA.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z5cuAddPiS_S_iPiS_S_i
.type _Z29__device_stub__Z5cuAddPiS_S_iPiS_S_i, @function
_Z29__device_stub__Z5cuAddPiS_S_iPiS_S_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5cuAddPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z29__device_stub__Z5cuAddPiS_S_iPiS_S_i, .-_Z29__device_stub__Z5cuAddPiS_S_iPiS_S_i
.globl _Z5cuAddPiS_S_i
.type _Z5cuAddPiS_S_i, @function
_Z5cuAddPiS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z5cuAddPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z5cuAddPiS_S_i, .-_Z5cuAddPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z5cuAddPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z5cuAddPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /**
* KERNEL cuAdd() - Takes 2 input arrays of same size N and adds them into C.
* Locations are found by computing the global index of each thread.
* @return
*/
__global__ void cuAdd(int *a,int *b,int *c, int N)
{
// global index
int offset = blockDim.x * blockIdx.x + threadIdx.x;
if(offset < N)
{
c[offset] = a[offset] + b[offset];
}
} | #include <hip/hip_runtime.h>
/**
* KERNEL cuAdd() - Takes 2 input arrays of same size N and adds them into C.
* Locations are found by computing the global index of each thread.
* @return
*/
__global__ void cuAdd(int *a,int *b,int *c, int N)
{
// global index
int offset = blockDim.x * blockIdx.x + threadIdx.x;
if(offset < N)
{
c[offset] = a[offset] + b[offset];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
/**
* KERNEL cuAdd() - Takes 2 input arrays of same size N and adds them into C.
* Locations are found by computing the global index of each thread.
* @return
*/
__global__ void cuAdd(int *a,int *b,int *c, int N)
{
// global index
int offset = blockDim.x * blockIdx.x + threadIdx.x;
if(offset < N)
{
c[offset] = a[offset] + b[offset];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5cuAddPiS_S_i
.globl _Z5cuAddPiS_S_i
.p2align 8
.type _Z5cuAddPiS_S_i,@function
_Z5cuAddPiS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5cuAddPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5cuAddPiS_S_i, .Lfunc_end0-_Z5cuAddPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5cuAddPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z5cuAddPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
/**
* KERNEL cuAdd() - Takes 2 input arrays of same size N and adds them into C.
* Locations are found by computing the global index of each thread.
* @return
*/
__global__ void cuAdd(int *a,int *b,int *c, int N)
{
// global index
int offset = blockDim.x * blockIdx.x + threadIdx.x;
if(offset < N)
{
c[offset] = a[offset] + b[offset];
}
} | .text
.file "d_VA.hip"
.globl _Z20__device_stub__cuAddPiS_S_i # -- Begin function _Z20__device_stub__cuAddPiS_S_i
.p2align 4, 0x90
.type _Z20__device_stub__cuAddPiS_S_i,@function
_Z20__device_stub__cuAddPiS_S_i: # @_Z20__device_stub__cuAddPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5cuAddPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z20__device_stub__cuAddPiS_S_i, .Lfunc_end0-_Z20__device_stub__cuAddPiS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5cuAddPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5cuAddPiS_S_i,@object # @_Z5cuAddPiS_S_i
.section .rodata,"a",@progbits
.globl _Z5cuAddPiS_S_i
.p2align 3, 0x0
_Z5cuAddPiS_S_i:
.quad _Z20__device_stub__cuAddPiS_S_i
.size _Z5cuAddPiS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z5cuAddPiS_S_i"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__cuAddPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5cuAddPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z5cuAddPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5cuAddPiS_S_i
.globl _Z5cuAddPiS_S_i
.p2align 8
.type _Z5cuAddPiS_S_i,@function
_Z5cuAddPiS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5cuAddPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5cuAddPiS_S_i, .Lfunc_end0-_Z5cuAddPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5cuAddPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z5cuAddPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00108ca4_00000000-6_d_VA.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z5cuAddPiS_S_iPiS_S_i
.type _Z29__device_stub__Z5cuAddPiS_S_iPiS_S_i, @function
_Z29__device_stub__Z5cuAddPiS_S_iPiS_S_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5cuAddPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z29__device_stub__Z5cuAddPiS_S_iPiS_S_i, .-_Z29__device_stub__Z5cuAddPiS_S_iPiS_S_i
.globl _Z5cuAddPiS_S_i
.type _Z5cuAddPiS_S_i, @function
_Z5cuAddPiS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z5cuAddPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z5cuAddPiS_S_i, .-_Z5cuAddPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z5cuAddPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z5cuAddPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "d_VA.hip"
.globl _Z20__device_stub__cuAddPiS_S_i # -- Begin function _Z20__device_stub__cuAddPiS_S_i
.p2align 4, 0x90
.type _Z20__device_stub__cuAddPiS_S_i,@function
_Z20__device_stub__cuAddPiS_S_i: # @_Z20__device_stub__cuAddPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5cuAddPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z20__device_stub__cuAddPiS_S_i, .Lfunc_end0-_Z20__device_stub__cuAddPiS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5cuAddPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5cuAddPiS_S_i,@object # @_Z5cuAddPiS_S_i
.section .rodata,"a",@progbits
.globl _Z5cuAddPiS_S_i
.p2align 3, 0x0
_Z5cuAddPiS_S_i:
.quad _Z20__device_stub__cuAddPiS_S_i
.size _Z5cuAddPiS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z5cuAddPiS_S_i"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__cuAddPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5cuAddPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
* CS-4370-90: Par. Prog. Many-Core GPUs
* Nathan Dunn
* Professor Liu
* 10/4/19
* Project 1 - Basic Matrix Multiplication
*/
#include <stdio.h>
#include <cuda.h>
// -------- EDIT THESE --------------
#define N 8 // size of the matrix
#define BLOCK 4 // size of thread block
/**
* Performs matrix multiplication on the GPU device
* dev_a - first matrix to be multiplied
* dev_b - second matrix to be multiplied
* dev_c - result of a * b is stored in this matrix
* size - size of the matrix (size * size)
*/
__global__ void MatrixMulKernel(int *dev_a, int *dev_b, int *dev_c, int size){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int column = blockIdx.x*blockDim.x+threadIdx.x;
if(row < size && column < size){
int sum = 0;
for(int k = 0; k < size; k++){
sum += dev_a[row * size + k] * dev_b[k * size + column];
}
dev_c[row * size + column] = sum;
}
}
/**
* Performs matrix multiplication on the CPU
* a - first matrix to be multiplied
* b - second matrix to be multiplied
* c - result of a * b is stored in this matrix
* size - size of the matrix (size * size)
*/
void MatrixMulOnHost(int *a, int *b, int *c, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
int sum = 0;
for(int k = 0; k < size; k++){
int d = a[i * size + k];
int e = b[k * size + j];
sum += d * e;
}
c[i * size + j] = sum;
}
}
}
/**
Prints a matrix.
matrix - matrix to be printed
size - size of the matrix
*/
void printMatrix(int * matrix, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
printf("%d ", matrix[i * size + j]);
}
printf("\n");
}
printf("\n");
}
/**
Verifies that two matrices are equal.
a - first matrix to be compared
b - second matrix to be compared
size - size of the matrix
*/
void verifyMult(int *a, int *b, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
int index = i * size + j;
if(a[index] != b[index]){
goto FAILED;
}
}
}
printf("TEST PASSED!!!\n");
return;
FAILED: printf("TEST FAILED!!!\n");
}
int main(void){
// define block size and count
int blockSize = BLOCK;
int blockCount = ceil(N/double(blockSize));
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(blockCount, blockCount, 1);
int *a, *b, *c, *d;
int *dev_a, *dev_b, *dev_c;
// allocate memory for matrix A, B, C, D
a = (int*)malloc(sizeof(int)*N*N);
b = (int*)malloc(sizeof(int)*N*N);
c = (int*)malloc(sizeof(int)*N*N);
d = (int*)malloc(sizeof(int)*N*N);
// initialize arrays a and b
int init = 1325;
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
int index = i * N + j;
init = 3125*init%65536;
a[index] = (init-32768)/6553;
b[index] = init%1000;
}
}
// perform CPU matrix multiplication for gpu multiplication verification
MatrixMulOnHost(a, b, c, N);
printf("Matrix A:\n");
printMatrix(a, N);
printf("\nMatrix B:\n");
printMatrix(b, N);
printf("\nCPU Multiplication of A * B:\n");
printMatrix(c, N);
printf("Thread Block Count: %d\n", blockCount);
printf("Starting GPU Computations\n\n");
// allocate device memory
cudaMalloc((void **)(&dev_a), N*N*sizeof(int));
cudaMalloc((void **)(&dev_b), N*N*sizeof(int));
cudaMalloc((void **)(&dev_c), N*N*sizeof(int));
// copy array a,b (system memory) to dev_a, dev_b (device memory)
cudaMemcpy(dev_a,a,N*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,N*N*sizeof(int), cudaMemcpyHostToDevice);
// launch kernels
MatrixMulKernel<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, N);
cudaDeviceSynchronize();
// copy results from GPU back to system memory
cudaMemcpy(d, dev_c, N*N*sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("GPU Multiplication of A * B:\n");
printMatrix(d, N);
// verify that CPU and GPU multiplication match
verifyMult(c, d, N);
// free system and device memory
free(a);
free(b);
free(c);
free(d);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | code for sm_80
Function : _Z15MatrixMulKernelPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e280000002100 */
/*0030*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e680000002600 */
/*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0205 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x002fca00078e0202 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x178], P0 ; /* 0x00005e0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ MOV R2, c[0x0][0x178] ; /* 0x00005e0000027a02 */
/* 0x000fe20000000f00 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ HFMA2.MMA R28, -RZ, RZ, 0, 0 ; /* 0x00000000ff1c7435 */
/* 0x000fe200000001ff */
/*00d0*/ IMAD R3, R3, c[0x0][0x178], RZ ; /* 0x00005e0003037a24 */
/* 0x000fe200078e02ff */
/*00e0*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fda0003f06270 */
/*00f0*/ @!P0 BRA 0xbf0 ; /* 0x00000af000008947 */
/* 0x000fea0003800000 */
/*0100*/ IADD3 R4, R2.reuse, -0x1, RZ ; /* 0xffffffff02047810 */
/* 0x040fe40007ffe0ff */
/*0110*/ LOP3.LUT R5, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302057812 */
/* 0x000fe400078ec0ff */
/*0120*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */
/* 0x000fe40003f06070 */
/*0130*/ MOV R4, RZ ; /* 0x000000ff00047202 */
/* 0x000fe40000000f00 */
/*0140*/ MOV R28, RZ ; /* 0x000000ff001c7202 */
/* 0x000fd20000000f00 */
/*0150*/ @!P0 BRA 0xaf0 ; /* 0x0000099000008947 */
/* 0x000fea0003800000 */
/*0160*/ IADD3 R6, -R5, c[0x0][0x178], RZ ; /* 0x00005e0005067a10 */
/* 0x000fe20007ffe1ff */
/*0170*/ HFMA2.MMA R25, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff197435 */
/* 0x000fe200000001ff */
/*0180*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*0190*/ MOV R4, RZ ; /* 0x000000ff00047202 */
/* 0x000fe40000000f00 */
/*01a0*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fcc0003f04270 */
/*01b0*/ IMAD.WIDE R24, R0, R25, c[0x0][0x168] ; /* 0x00005a0000187625 */
/* 0x000fce00078e0219 */
/*01c0*/ @!P0 BRA 0x960 ; /* 0x0000079000008947 */
/* 0x000fea0003800000 */
/*01d0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe40003f24270 */
/*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01f0*/ @!P1 BRA 0x6a0 ; /* 0x000004a000009947 */
/* 0x000fea0003800000 */
/*0200*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0210*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0220*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x0000a2000c1e1900 */
/*0230*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fca0008000f00 */
/*0240*/ IMAD.WIDE R12, R3, 0x4, R12 ; /* 0x00000004030c7825 */
/* 0x000fca00078e020c */
/*0250*/ LDG.E R27, [R12.64] ; /* 0x000000040c1b7981 */
/* 0x000ea2000c1e1900 */
/*0260*/ IMAD.WIDE R10, R2, 0x4, R24 ; /* 0x00000004020a7825 */
/* 0x000fc600078e0218 */
/*0270*/ LDG.E R17, [R12.64+0x4] ; /* 0x000004040c117981 */
/* 0x000ee6000c1e1900 */
/*0280*/ IMAD.WIDE R18, R2.reuse, 0x4, R10 ; /* 0x0000000402127825 */
/* 0x040fe200078e020a */
/*0290*/ LDG.E R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x0002e8000c1e1900 */
/*02a0*/ LDG.E R7, [R12.64+0xc] ; /* 0x00000c040c077981 */
/* 0x000f22000c1e1900 */
/*02b0*/ IMAD.WIDE R14, R2, 0x4, R18 ; /* 0x00000004020e7825 */
/* 0x000fc600078e0212 */
/*02c0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000b26000c1e1900 */
/*02d0*/ IMAD.WIDE R20, R2.reuse, 0x4, R14 ; /* 0x0000000402147825 */
/* 0x040fe200078e020e */
/*02e0*/ LDG.E R26, [R14.64] ; /* 0x000000040e1a7981 */
/* 0x000128000c1e1900 */
/*02f0*/ LDG.E R9, [R12.64+0x10] ; /* 0x000010040c097981 */
/* 0x000f28000c1e1900 */
/*0300*/ LDG.E R19, [R12.64+0x8] ; /* 0x000008040c137981 */
/* 0x020f22000c1e1900 */
/*0310*/ IMAD.WIDE R14, R2, 0x4, R20 ; /* 0x00000004020e7825 */
/* 0x001fc600078e0214 */
/*0320*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000166000c1e1900 */
/*0330*/ IMAD.WIDE R22, R2.reuse, 0x4, R14 ; /* 0x0000000402167825 */
/* 0x040fe200078e020e */
/*0340*/ LDG.E R8, [R14.64] ; /* 0x000000040e087981 */
/* 0x000168000c1e1900 */
/*0350*/ LDG.E R11, [R12.64+0x14] ; /* 0x000014040c0b7981 */
/* 0x002f62000c1e1900 */
/*0360*/ IMAD.WIDE R24, R2, 0x4, R22 ; /* 0x0000000402187825 */
/* 0x000fc600078e0216 */
/*0370*/ LDG.E R10, [R22.64] ; /* 0x00000004160a7981 */
/* 0x000368000c1e1900 */
/*0380*/ LDG.E R21, [R12.64+0x18] ; /* 0x000018040c157981 */
/* 0x001f62000c1e1900 */
/*0390*/ IMAD R29, R29, R27, R28 ; /* 0x0000001b1d1d7224 */
/* 0x004fc600078e021c */
/*03a0*/ LDG.E R27, [R12.64+0x1c] ; /* 0x00001c040c1b7981 */
/* 0x000ea8000c1e1900 */
/*03b0*/ LDG.E R28, [R24.64] ; /* 0x00000004181c7981 */
/* 0x0000a2000c1e1900 */
/*03c0*/ IMAD.WIDE R14, R2, 0x4, R24 ; /* 0x00000004020e7825 */
/* 0x000fc800078e0218 */
/*03d0*/ IMAD R29, R16, R17, R29 ; /* 0x00000011101d7224 */
/* 0x008fe400078e021d */
/*03e0*/ IMAD.WIDE R16, R2, 0x4, R14 ; /* 0x0000000402107825 */
/* 0x000fe400078e020e */
/*03f0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x0006a4000c1e1900 */
/*0400*/ IMAD R29, R18, R19, R29 ; /* 0x00000013121d7224 */
/* 0x010fe400078e021d */
/*0410*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */
/* 0x000fe400078e0210 */
/*0420*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x0008a4000c1e1900 */
/*0430*/ IMAD R26, R26, R7, R29 ; /* 0x000000071a1a7224 */
/* 0x000fc400078e021d */
/*0440*/ IMAD.WIDE R22, R2.reuse, 0x4, R18 ; /* 0x0000000402167825 */
/* 0x042fe200078e0212 */
/*0450*/ LDG.E R7, [R12.64+0x20] ; /* 0x000020040c077981 */
/* 0x000ea8000c1e1900 */
/*0460*/ LDG.E R29, [R12.64+0x24] ; /* 0x000024040c1d7981 */
/* 0x000ea2000c1e1900 */
/*0470*/ IMAD.WIDE R24, R2, 0x4, R22 ; /* 0x0000000402187825 */
/* 0x001fc600078e0216 */
/*0480*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x0000a2000c1e1900 */
/*0490*/ IMAD R9, R20, R9, R26 ; /* 0x0000000914097224 */
/* 0x020fc600078e021a */
/*04a0*/ LDG.E R26, [R12.64+0x28] ; /* 0x000028040c1a7981 */
/* 0x000f62000c1e1900 */
/*04b0*/ IMAD R11, R8, R11, R9 ; /* 0x0000000b080b7224 */
/* 0x000fe400078e0209 */
/*04c0*/ IMAD.WIDE R8, R2, 0x4, R24 ; /* 0x0000000402087825 */
/* 0x000fe200078e0218 */
/*04d0*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x000368000c1e1900 */
/*04e0*/ LDG.E R17, [R12.64+0x2c] ; /* 0x00002c040c117981 */
/* 0x010f22000c1e1900 */
/*04f0*/ IMAD R21, R10, R21, R11 ; /* 0x000000150a157224 */
/* 0x000fc600078e020b */
/*0500*/ LDG.E R15, [R24.64] ; /* 0x00000004180f7981 */
/* 0x008722000c1e1900 */
/*0510*/ IMAD.WIDE R10, R2, 0x4, R8 ; /* 0x00000004020a7825 */
/* 0x000fc600078e0208 */
/*0520*/ LDG.E R19, [R8.64] ; /* 0x0000000408137981 */
/* 0x001128000c1e1900 */
/*0530*/ LDG.E R23, [R10.64] ; /* 0x000000040a177981 */
/* 0x002f28000c1e1900 */
/*0540*/ LDG.E R24, [R12.64+0x30] ; /* 0x000030040c187981 */
/* 0x008ee8000c1e1900 */
/*0550*/ LDG.E R25, [R12.64+0x38] ; /* 0x000038040c197981 */
/* 0x000ee8000c1e1900 */
/*0560*/ LDG.E R8, [R12.64+0x3c] ; /* 0x00003c040c087981 */
/* 0x001ee2000c1e1900 */
/*0570*/ IMAD R9, R28, R27, R21 ; /* 0x0000001b1c097224 */
/* 0x004fc600078e0215 */
/*0580*/ LDG.E R28, [R12.64+0x34] ; /* 0x000034040c1c7981 */
/* 0x000ea2000c1e1900 */
/*0590*/ IMAD.WIDE R20, R2, 0x4, R10 ; /* 0x0000000402147825 */
/* 0x000fca00078e020a */
/*05a0*/ LDG.E R27, [R20.64] ; /* 0x00000004141b7981 */
/* 0x000ea2000c1e1900 */
/*05b0*/ IADD3 R6, R6, -0x10, RZ ; /* 0xfffffff006067810 */
/* 0x000fc80007ffe0ff */
/*05c0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe20003f24270 */
/*05d0*/ IMAD R7, R14, R7, R9 ; /* 0x000000070e077224 */
/* 0x000fc800078e0209 */
/*05e0*/ IMAD R7, R16, R29, R7 ; /* 0x0000001d10077224 */
/* 0x000fc800078e0207 */
/*05f0*/ IMAD R7, R18, R26, R7 ; /* 0x0000001a12077224 */
/* 0x020fc800078e0207 */
/*0600*/ IMAD R7, R22, R17, R7 ; /* 0x0000001116077224 */
/* 0x010fe200078e0207 */
/*0610*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*0620*/ IADD3 R4, R4, 0x10, RZ ; /* 0x0000001004047810 */
/* 0x000fc60007ffe0ff */
/*0630*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0640*/ IMAD R7, R15, R24, R7 ; /* 0x000000180f077224 */
/* 0x008fc800078e0207 */
/*0650*/ IMAD R28, R19, R28, R7 ; /* 0x0000001c131c7224 */
/* 0x004fc800078e0207 */
/*0660*/ IMAD R28, R23, R25, R28 ; /* 0x00000019171c7224 */
/* 0x000fe400078e021c */
/*0670*/ IMAD.WIDE R24, R2, 0x4, R20 ; /* 0x0000000402187825 */
/* 0x000fc800078e0214 */
/*0680*/ IMAD R28, R27, R8, R28 ; /* 0x000000081b1c7224 */
/* 0x000fe200078e021c */
/*0690*/ @P1 BRA 0x210 ; /* 0xfffffb7000001947 */
/* 0x000fea000383ffff */
/*06a0*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */
/* 0x000fda0003f24270 */
/*06b0*/ @!P1 BRA 0x940 ; /* 0x0000028000009947 */
/* 0x000fea0003800000 */
/*06c0*/ IMAD.WIDE R16, R2, 0x4, R24 ; /* 0x0000000402107825 */
/* 0x000fe200078e0218 */
/*06d0*/ MOV R8, UR6 ; /* 0x0000000600087c02 */
/* 0x000fe20008000f00 */
/*06e0*/ LDG.E R7, [R24.64] ; /* 0x0000000418077981 */
/* 0x0000a2000c1e1900 */
/*06f0*/ MOV R9, UR7 ; /* 0x0000000700097c02 */
/* 0x000fc60008000f00 */
/*0700*/ IMAD.WIDE R12, R2.reuse, 0x4, R16 ; /* 0x00000004020c7825 */
/* 0x040fe200078e0210 */
/*0710*/ LDG.E R21, [R16.64] ; /* 0x0000000410157981 */
/* 0x0002e6000c1e1900 */
/*0720*/ IMAD.WIDE R8, R3, 0x4, R8 ; /* 0x0000000403087825 */
/* 0x000fe200078e0208 */
/*0730*/ LDG.E R23, [R12.64] ; /* 0x000000040c177981 */
/* 0x000966000c1e1900 */
/*0740*/ IMAD.WIDE R14, R2.reuse, 0x4, R12 ; /* 0x00000004020e7825 */
/* 0x040fe200078e020c */
/*0750*/ LDG.E R20, [R8.64] ; /* 0x0000000408147981 */
/* 0x000ea8000c1e1900 */
/*0760*/ LDG.E R22, [R8.64+0x4] ; /* 0x0000040408167981 */
/* 0x000ee2000c1e1900 */
/*0770*/ IMAD.WIDE R10, R2, 0x4, R14 ; /* 0x00000004020a7825 */
/* 0x000fc600078e020e */
/*0780*/ LDG.E R26, [R8.64+0x8] ; /* 0x00000804081a7981 */
/* 0x000f66000c1e1900 */
/*0790*/ IMAD.WIDE R16, R2.reuse, 0x4, R10 ; /* 0x0000000402107825 */
/* 0x042fe200078e020a */
/*07a0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000368000c1e1900 */
/*07b0*/ LDG.E R27, [R8.64+0xc] ; /* 0x00000c04081b7981 */
/* 0x000f62000c1e1900 */
/*07c0*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */
/* 0x000fc600078e0210 */
/*07d0*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000368000c1e1900 */
/*07e0*/ LDG.E R25, [R8.64+0x10] ; /* 0x0000100408197981 */
/* 0x001f62000c1e1900 */
/*07f0*/ IMAD.WIDE R12, R2, 0x4, R18 ; /* 0x00000004020c7825 */
/* 0x010fc600078e0212 */
/*0800*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000f28000c1e1900 */
/*0810*/ LDG.E R29, [R8.64+0x14] ; /* 0x00001404081d7981 */
/* 0x000f28000c1e1900 */
/*0820*/ LDG.E R24, [R18.64] ; /* 0x0000000412187981 */
/* 0x000128000c1e1900 */
/*0830*/ LDG.E R11, [R8.64+0x18] ; /* 0x00001804080b7981 */
/* 0x002f28000c1e1900 */
/*0840*/ LDG.E R15, [R12.64] ; /* 0x000000040c0f7981 */
/* 0x000f28000c1e1900 */
/*0850*/ LDG.E R18, [R8.64+0x1c] ; /* 0x00001c0408127981 */
/* 0x001f22000c1e1900 */
/*0860*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*0870*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40003f0e170 */
/*0880*/ IADD3 R4, R4, 0x8, RZ ; /* 0x0000000804047810 */
/* 0x000fe40007ffe0ff */
/*0890*/ IADD3 R6, R6, -0x8, RZ ; /* 0xfffffff806067810 */
/* 0x000fe20007ffe0ff */
/*08a0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*08b0*/ IMAD R7, R7, R20, R28 ; /* 0x0000001407077224 */
/* 0x004fc800078e021c */
/*08c0*/ IMAD R7, R21, R22, R7 ; /* 0x0000001615077224 */
/* 0x008fc800078e0207 */
/*08d0*/ IMAD R7, R23, R26, R7 ; /* 0x0000001a17077224 */
/* 0x020fc800078e0207 */
/*08e0*/ IMAD R7, R14, R27, R7 ; /* 0x0000001b0e077224 */
/* 0x000fc800078e0207 */
/*08f0*/ IMAD R7, R10, R25, R7 ; /* 0x000000190a077224 */
/* 0x000fc800078e0207 */
/*0900*/ IMAD R7, R16, R29, R7 ; /* 0x0000001d10077224 */
/* 0x010fc800078e0207 */
/*0910*/ IMAD R7, R24, R11, R7 ; /* 0x0000000b18077224 */
/* 0x000fe400078e0207 */
/*0920*/ IMAD.WIDE R24, R2, 0x4, R12 ; /* 0x0000000402187825 */
/* 0x000fc800078e020c */
/*0930*/ IMAD R28, R15, R18, R7 ; /* 0x000000120f1c7224 */
/* 0x000fe400078e0207 */
/*0940*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */
/* 0x000fda0000705670 */
/*0950*/ @!P0 BRA 0xaf0 ; /* 0x0000019000008947 */
/* 0x000fea0003800000 */
/*0960*/ MOV R8, UR6 ; /* 0x0000000600087c02 */
/* 0x000fe20008000f00 */
/*0970*/ IMAD.WIDE R14, R2, 0x4, R24 ; /* 0x00000004020e7825 */
/* 0x000fe200078e0218 */
/*0980*/ MOV R9, UR7 ; /* 0x0000000700097c02 */
/* 0x000fe20008000f00 */
/*0990*/ LDG.E R25, [R24.64] ; /* 0x0000000418197981 */
/* 0x000ea8000c1e1900 */
/*09a0*/ IMAD.WIDE R8, R3, 0x4, R8 ; /* 0x0000000403087825 */
/* 0x000fc800078e0208 */
/*09b0*/ IMAD.WIDE R12, R2.reuse, 0x4, R14 ; /* 0x00000004020c7825 */
/* 0x040fe200078e020e */
/*09c0*/ LDG.E R7, [R8.64] ; /* 0x0000000408077981 */
/* 0x000ea8000c1e1900 */
/*09d0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ee2000c1e1900 */
/*09e0*/ IMAD.WIDE R10, R2, 0x4, R12 ; /* 0x00000004020a7825 */
/* 0x000fc600078e020c */
/*09f0*/ LDG.E R16, [R8.64+0x4] ; /* 0x0000040408107981 */
/* 0x000ee8000c1e1900 */
/*0a00*/ LDG.E R18, [R12.64] ; /* 0x000000040c127981 */
/* 0x000f28000c1e1900 */
/*0a10*/ LDG.E R17, [R8.64+0x8] ; /* 0x0000080408117981 */
/* 0x000f28000c1e1900 */
/*0a20*/ LDG.E R19, [R8.64+0xc] ; /* 0x00000c0408137981 */
/* 0x000f68000c1e1900 */
/*0a30*/ LDG.E R20, [R10.64] ; /* 0x000000040a147981 */
/* 0x000f62000c1e1900 */
/*0a40*/ IADD3 R6, R6, -0x4, RZ ; /* 0xfffffffc06067810 */
/* 0x000fc80007ffe0ff */
/*0a50*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f05270 */
/*0a60*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0a70*/ IADD3 R4, R4, 0x4, RZ ; /* 0x0000000404047810 */
/* 0x000fc60007ffe0ff */
/*0a80*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0a90*/ IMAD R7, R25, R7, R28 ; /* 0x0000000719077224 */
/* 0x004fc800078e021c */
/*0aa0*/ IMAD R7, R14, R16, R7 ; /* 0x000000100e077224 */
/* 0x008fe400078e0207 */
/*0ab0*/ IMAD.WIDE R24, R2, 0x4, R10 ; /* 0x0000000402187825 */
/* 0x000fc800078e020a */
/*0ac0*/ IMAD R7, R18, R17, R7 ; /* 0x0000001112077224 */
/* 0x010fc800078e0207 */
/*0ad0*/ IMAD R28, R20, R19, R7 ; /* 0x00000013141c7224 */
/* 0x020fe200078e0207 */
/*0ae0*/ @P0 BRA 0x960 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0af0*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05270 */
/*0b00*/ @!P0 BRA 0xbf0 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0b10*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0b20*/ IADD3 R6, R3, R4, RZ ; /* 0x0000000403067210 */
/* 0x000fe20007ffe0ff */
/*0b30*/ IMAD R4, R4, c[0x0][0x178], R0 ; /* 0x00005e0004047a24 */
/* 0x000fd000078e0200 */
/*0b40*/ IMAD.WIDE R6, R6, R9, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fc800078e0209 */
/*0b50*/ IMAD.WIDE R8, R4, R9, c[0x0][0x168] ; /* 0x00005a0004087625 */
/* 0x000fca00078e0209 */
/*0b60*/ LDG.E R11, [R8.64] ; /* 0x00000004080b7981 */
/* 0x0000a8000c1e1900 */
/*0b70*/ LDG.E R4, [R6.64] ; /* 0x0000000406047981 */
/* 0x0002a2000c1e1900 */
/*0b80*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */
/* 0x000fc80007ffe0ff */
/*0b90*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f05270 */
/*0ba0*/ IMAD.WIDE R8, R2, 0x4, R8 ; /* 0x0000000402087825 */
/* 0x001fe200078e0208 */
/*0bb0*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x002fc80007f3e0ff */
/*0bc0*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*0bd0*/ IMAD R28, R11, R4, R28 ; /* 0x000000040b1c7224 */
/* 0x004fcc00078e021c */
/*0be0*/ @P0 BRA 0xb60 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0bf0*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */
/* 0x000fe40007ffe0ff */
/*0c00*/ MOV R2, 0x4 ; /* 0x0000000400027802 */
/* 0x000fca0000000f00 */
/*0c10*/ IMAD.WIDE R2, R3, R2, c[0x0][0x170] ; /* 0x00005c0003027625 */
/* 0x000fca00078e0202 */
/*0c20*/ STG.E [R2.64], R28 ; /* 0x0000001c02007986 */
/* 0x000fe2000c101904 */
/*0c30*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c40*/ BRA 0xc40; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
* CS-4370-90: Par. Prog. Many-Core GPUs
* Nathan Dunn
* Professor Liu
* 10/4/19
* Project 1 - Basic Matrix Multiplication
*/
#include <stdio.h>
#include <cuda.h>
// -------- EDIT THESE --------------
#define N 8 // size of the matrix
#define BLOCK 4 // size of thread block
/**
* Performs matrix multiplication on the GPU device
* dev_a - first matrix to be multiplied
* dev_b - second matrix to be multiplied
* dev_c - result of a * b is stored in this matrix
* size - size of the matrix (size * size)
*/
__global__ void MatrixMulKernel(int *dev_a, int *dev_b, int *dev_c, int size){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int column = blockIdx.x*blockDim.x+threadIdx.x;
if(row < size && column < size){
int sum = 0;
for(int k = 0; k < size; k++){
sum += dev_a[row * size + k] * dev_b[k * size + column];
}
dev_c[row * size + column] = sum;
}
}
/**
* Performs matrix multiplication on the CPU
* a - first matrix to be multiplied
* b - second matrix to be multiplied
* c - result of a * b is stored in this matrix
* size - size of the matrix (size * size)
*/
void MatrixMulOnHost(int *a, int *b, int *c, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
int sum = 0;
for(int k = 0; k < size; k++){
int d = a[i * size + k];
int e = b[k * size + j];
sum += d * e;
}
c[i * size + j] = sum;
}
}
}
/**
Prints a matrix.
matrix - matrix to be printed
size - size of the matrix
*/
void printMatrix(int * matrix, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
printf("%d ", matrix[i * size + j]);
}
printf("\n");
}
printf("\n");
}
/**
Verifies that two matrices are equal.
a - first matrix to be compared
b - second matrix to be compared
size - size of the matrix
*/
void verifyMult(int *a, int *b, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
int index = i * size + j;
if(a[index] != b[index]){
goto FAILED;
}
}
}
printf("TEST PASSED!!!\n");
return;
FAILED: printf("TEST FAILED!!!\n");
}
int main(void){
// define block size and count
int blockSize = BLOCK;
int blockCount = ceil(N/double(blockSize));
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(blockCount, blockCount, 1);
int *a, *b, *c, *d;
int *dev_a, *dev_b, *dev_c;
// allocate memory for matrix A, B, C, D
a = (int*)malloc(sizeof(int)*N*N);
b = (int*)malloc(sizeof(int)*N*N);
c = (int*)malloc(sizeof(int)*N*N);
d = (int*)malloc(sizeof(int)*N*N);
// initialize arrays a and b
int init = 1325;
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
int index = i * N + j;
init = 3125*init%65536;
a[index] = (init-32768)/6553;
b[index] = init%1000;
}
}
// perform CPU matrix multiplication for gpu multiplication verification
MatrixMulOnHost(a, b, c, N);
printf("Matrix A:\n");
printMatrix(a, N);
printf("\nMatrix B:\n");
printMatrix(b, N);
printf("\nCPU Multiplication of A * B:\n");
printMatrix(c, N);
printf("Thread Block Count: %d\n", blockCount);
printf("Starting GPU Computations\n\n");
// allocate device memory
cudaMalloc((void **)(&dev_a), N*N*sizeof(int));
cudaMalloc((void **)(&dev_b), N*N*sizeof(int));
cudaMalloc((void **)(&dev_c), N*N*sizeof(int));
// copy array a,b (system memory) to dev_a, dev_b (device memory)
cudaMemcpy(dev_a,a,N*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,N*N*sizeof(int), cudaMemcpyHostToDevice);
// launch kernels
MatrixMulKernel<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, N);
cudaDeviceSynchronize();
// copy results from GPU back to system memory
cudaMemcpy(d, dev_c, N*N*sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("GPU Multiplication of A * B:\n");
printMatrix(d, N);
// verify that CPU and GPU multiplication match
verifyMult(c, d, N);
// free system and device memory
free(a);
free(b);
free(c);
free(d);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | .file "tmpxft_0004f016_00000000-6_dunn_project1_mult.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z15MatrixMulOnHostPiS_S_i
.type _Z15MatrixMulOnHostPiS_S_i, @function
_Z15MatrixMulOnHostPiS_S_i:
.LFB2057:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L11
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movq %rdi, %r8
movq %rsi, %rbp
movq %rdx, %r11
movl %ecx, %r13d
movslq %ecx, %r12
leaq 0(,%r12,4), %rdi
movq %r8, %rbx
addq %rdi, %r8
movl $0, %r14d
.L5:
movq %rbp, %r10
movl $0, %r9d
.L8:
movq %r10, %rcx
movq %rbx, %rax
movl $0, %esi
.L6:
movl (%rax), %edx
imull (%rcx), %edx
addl %edx, %esi
addq $4, %rax
addq %rdi, %rcx
cmpq %r8, %rax
jne .L6
movl %esi, (%r11,%r9,4)
addq $1, %r9
addq $4, %r10
cmpq %r12, %r9
jne .L8
addl $1, %r14d
addq %rdi, %r11
addq %rdi, %rbx
addq %rdi, %r8
cmpl %r14d, %r13d
jne .L5
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
ret
.cfi_endproc
.LFE2057:
.size _Z15MatrixMulOnHostPiS_S_i, .-_Z15MatrixMulOnHostPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d "
.LC1:
.string "\n"
.text
.globl _Z11printMatrixPii
.type _Z11printMatrixPii, @function
_Z11printMatrixPii:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl %esi, 12(%rsp)
testl %esi, %esi
jle .L15
movslq %esi, %r14
leaq 0(,%r14,4), %r15
leaq (%rdi,%r15), %rbp
negq %r14
salq $2, %r14
movl $0, %r13d
leaq .LC0(%rip), %r12
.L16:
leaq 0(%rbp,%r14), %rbx
.L17:
movl (%rbx), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L17
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addq %r15, %rbp
cmpl %r13d, 12(%rsp)
jne .L16
.L15:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z11printMatrixPii, .-_Z11printMatrixPii
.section .rodata.str1.1
.LC2:
.string "TEST FAILED!!!\n"
.LC3:
.string "TEST PASSED!!!\n"
.text
.globl _Z10verifyMultPiS_i
.type _Z10verifyMultPiS_i, @function
_Z10verifyMultPiS_i:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
testl %edx, %edx
jle .L21
movslq %edx, %rax
leaq 0(,%rax,4), %r10
negq %rax
leaq 0(,%rax,4), %r8
movq %r10, %rcx
movl $0, %r9d
.L22:
leaq (%rcx,%r8), %rax
.L25:
movl (%rsi,%rax), %r11d
cmpl %r11d, (%rdi,%rax)
jne .L28
addq $4, %rax
cmpq %rcx, %rax
jne .L25
addl $1, %r9d
addq %r10, %rcx
cmpl %r9d, %edx
jne .L22
.L21:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L20
.L28:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L20:
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z10verifyMultPiS_i, .-_Z10verifyMultPiS_i
.globl _Z40__device_stub__Z15MatrixMulKernelPiS_S_iPiS_S_i
.type _Z40__device_stub__Z15MatrixMulKernelPiS_S_iPiS_S_i, @function
_Z40__device_stub__Z15MatrixMulKernelPiS_S_iPiS_S_i:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z15MatrixMulKernelPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z40__device_stub__Z15MatrixMulKernelPiS_S_iPiS_S_i, .-_Z40__device_stub__Z15MatrixMulKernelPiS_S_iPiS_S_i
.globl _Z15MatrixMulKernelPiS_S_i
.type _Z15MatrixMulKernelPiS_S_i, @function
_Z15MatrixMulKernelPiS_S_i:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z15MatrixMulKernelPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z15MatrixMulKernelPiS_S_i, .-_Z15MatrixMulKernelPiS_S_i
.section .rodata.str1.1
.LC4:
.string "Matrix A:\n"
.LC5:
.string "\nMatrix B:\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC6:
.string "\nCPU Multiplication of A * B:\n"
.section .rodata.str1.1
.LC7:
.string "Thread Block Count: %d\n"
.LC8:
.string "Starting GPU Computations\n\n"
.LC9:
.string "GPU Multiplication of A * B:\n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $72, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $4, 32(%rsp)
movl $4, 36(%rsp)
movl $1, 40(%rsp)
movl $2, 44(%rsp)
movl $2, 48(%rsp)
movl $1, 52(%rsp)
movl $256, %edi
call malloc@PLT
movq %rax, %rbp
movl $256, %edi
call malloc@PLT
movq %rax, %rbx
movl $256, %edi
call malloc@PLT
movq %rax, %r13
movl $256, %edi
call malloc@PLT
movq %rax, %r12
movl $32, %edi
movl $1325, %edx
.L38:
leaq -32(%rdi), %rsi
.L39:
imull $3125, %edx, %edx
movl %edx, %eax
sarl $31, %eax
shrl $16, %eax
addl %eax, %edx
movzwl %dx, %edx
movl %edx, %ecx
subl %eax, %ecx
movl %ecx, %edx
leal -32768(%rcx), %r8d
movslq %r8d, %rax
imulq $-1610366953, %rax, %rax
shrq $32, %rax
addl %r8d, %eax
sarl $12, %eax
sarl $31, %r8d
subl %r8d, %eax
movl %eax, 0(%rbp,%rsi)
movslq %ecx, %rax
imulq $274877907, %rax, %rax
sarq $38, %rax
movl %ecx, %r8d
sarl $31, %r8d
subl %r8d, %eax
imull $1000, %eax, %eax
subl %eax, %ecx
movl %ecx, (%rbx,%rsi)
addq $4, %rsi
cmpq %rdi, %rsi
jne .L39
addq $32, %rdi
cmpq $288, %rdi
jne .L38
movl $8, %ecx
movq %r13, %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z15MatrixMulOnHostPiS_S_i
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $8, %esi
movq %rbp, %rdi
call _Z11printMatrixPii
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $8, %esi
movq %rbx, %rdi
call _Z11printMatrixPii
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $8, %esi
movq %r13, %rdi
call _Z11printMatrixPii
movl $2, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 8(%rsp), %rdi
movl $256, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $256, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $256, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $256, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $256, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl 40(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movq 44(%rsp), %rdi
movl 52(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L45
.L41:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movl $256, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
call cudaDeviceSynchronize@PLT
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $8, %esi
movq %r12, %rdi
call _Z11printMatrixPii
movl $8, %edx
movq %r12, %rsi
movq %r13, %rdi
call _Z10verifyMultPiS_i
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L46
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
movl $8, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z40__device_stub__Z15MatrixMulKernelPiS_S_iPiS_S_i
jmp .L41
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z15MatrixMulKernelPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z15MatrixMulKernelPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
* CS-4370-90: Par. Prog. Many-Core GPUs
* Nathan Dunn
* Professor Liu
* 10/4/19
* Project 1 - Basic Matrix Multiplication
*/
#include <stdio.h>
#include <cuda.h>
// -------- EDIT THESE --------------
#define N 8 // size of the matrix
#define BLOCK 4 // size of thread block
/**
* Performs matrix multiplication on the GPU device
* dev_a - first matrix to be multiplied
* dev_b - second matrix to be multiplied
* dev_c - result of a * b is stored in this matrix
* size - size of the matrix (size * size)
*/
__global__ void MatrixMulKernel(int *dev_a, int *dev_b, int *dev_c, int size){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int column = blockIdx.x*blockDim.x+threadIdx.x;
if(row < size && column < size){
int sum = 0;
for(int k = 0; k < size; k++){
sum += dev_a[row * size + k] * dev_b[k * size + column];
}
dev_c[row * size + column] = sum;
}
}
/**
* Performs matrix multiplication on the CPU
* a - first matrix to be multiplied
* b - second matrix to be multiplied
* c - result of a * b is stored in this matrix
* size - size of the matrix (size * size)
*/
void MatrixMulOnHost(int *a, int *b, int *c, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
int sum = 0;
for(int k = 0; k < size; k++){
int d = a[i * size + k];
int e = b[k * size + j];
sum += d * e;
}
c[i * size + j] = sum;
}
}
}
/**
Prints a matrix.
matrix - matrix to be printed
size - size of the matrix
*/
void printMatrix(int * matrix, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
printf("%d ", matrix[i * size + j]);
}
printf("\n");
}
printf("\n");
}
/**
Verifies that two matrices are equal.
a - first matrix to be compared
b - second matrix to be compared
size - size of the matrix
*/
void verifyMult(int *a, int *b, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
int index = i * size + j;
if(a[index] != b[index]){
goto FAILED;
}
}
}
printf("TEST PASSED!!!\n");
return;
FAILED: printf("TEST FAILED!!!\n");
}
int main(void){
// define block size and count
int blockSize = BLOCK;
int blockCount = ceil(N/double(blockSize));
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(blockCount, blockCount, 1);
int *a, *b, *c, *d;
int *dev_a, *dev_b, *dev_c;
// allocate memory for matrix A, B, C, D
a = (int*)malloc(sizeof(int)*N*N);
b = (int*)malloc(sizeof(int)*N*N);
c = (int*)malloc(sizeof(int)*N*N);
d = (int*)malloc(sizeof(int)*N*N);
// initialize arrays a and b
int init = 1325;
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
int index = i * N + j;
init = 3125*init%65536;
a[index] = (init-32768)/6553;
b[index] = init%1000;
}
}
// perform CPU matrix multiplication for gpu multiplication verification
MatrixMulOnHost(a, b, c, N);
printf("Matrix A:\n");
printMatrix(a, N);
printf("\nMatrix B:\n");
printMatrix(b, N);
printf("\nCPU Multiplication of A * B:\n");
printMatrix(c, N);
printf("Thread Block Count: %d\n", blockCount);
printf("Starting GPU Computations\n\n");
// allocate device memory
cudaMalloc((void **)(&dev_a), N*N*sizeof(int));
cudaMalloc((void **)(&dev_b), N*N*sizeof(int));
cudaMalloc((void **)(&dev_c), N*N*sizeof(int));
// copy array a,b (system memory) to dev_a, dev_b (device memory)
cudaMemcpy(dev_a,a,N*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,N*N*sizeof(int), cudaMemcpyHostToDevice);
// launch kernels
MatrixMulKernel<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, N);
cudaDeviceSynchronize();
// copy results from GPU back to system memory
cudaMemcpy(d, dev_c, N*N*sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("GPU Multiplication of A * B:\n");
printMatrix(d, N);
// verify that CPU and GPU multiplication match
verifyMult(c, d, N);
// free system and device memory
free(a);
free(b);
free(c);
free(d);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | /*
* CS-4370-90: Par. Prog. Many-Core GPUs
* Nathan Dunn
* Professor Liu
* 10/4/19
* Project 1 - Basic Matrix Multiplication
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
// -------- EDIT THESE --------------
#define N 8 // size of the matrix
#define BLOCK 4 // size of thread block
/**
* Performs matrix multiplication on the GPU device
* dev_a - first matrix to be multiplied
* dev_b - second matrix to be multiplied
* dev_c - result of a * b is stored in this matrix
* size - size of the matrix (size * size)
*/
__global__ void MatrixMulKernel(int *dev_a, int *dev_b, int *dev_c, int size){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int column = blockIdx.x*blockDim.x+threadIdx.x;
if(row < size && column < size){
int sum = 0;
for(int k = 0; k < size; k++){
sum += dev_a[row * size + k] * dev_b[k * size + column];
}
dev_c[row * size + column] = sum;
}
}
/**
* Performs matrix multiplication on the CPU
* a - first matrix to be multiplied
* b - second matrix to be multiplied
* c - result of a * b is stored in this matrix
* size - size of the matrix (size * size)
*/
void MatrixMulOnHost(int *a, int *b, int *c, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
int sum = 0;
for(int k = 0; k < size; k++){
int d = a[i * size + k];
int e = b[k * size + j];
sum += d * e;
}
c[i * size + j] = sum;
}
}
}
/**
Prints a matrix.
matrix - matrix to be printed
size - size of the matrix
*/
void printMatrix(int * matrix, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
printf("%d ", matrix[i * size + j]);
}
printf("\n");
}
printf("\n");
}
/**
Verifies that two matrices are equal.
a - first matrix to be compared
b - second matrix to be compared
size - size of the matrix
*/
void verifyMult(int *a, int *b, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
int index = i * size + j;
if(a[index] != b[index]){
goto FAILED;
}
}
}
printf("TEST PASSED!!!\n");
return;
FAILED: printf("TEST FAILED!!!\n");
}
int main(void){
// define block size and count
int blockSize = BLOCK;
int blockCount = ceil(N/double(blockSize));
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(blockCount, blockCount, 1);
int *a, *b, *c, *d;
int *dev_a, *dev_b, *dev_c;
// allocate memory for matrix A, B, C, D
a = (int*)malloc(sizeof(int)*N*N);
b = (int*)malloc(sizeof(int)*N*N);
c = (int*)malloc(sizeof(int)*N*N);
d = (int*)malloc(sizeof(int)*N*N);
// initialize arrays a and b
int init = 1325;
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
int index = i * N + j;
init = 3125*init%65536;
a[index] = (init-32768)/6553;
b[index] = init%1000;
}
}
// perform CPU matrix multiplication for gpu multiplication verification
MatrixMulOnHost(a, b, c, N);
printf("Matrix A:\n");
printMatrix(a, N);
printf("\nMatrix B:\n");
printMatrix(b, N);
printf("\nCPU Multiplication of A * B:\n");
printMatrix(c, N);
printf("Thread Block Count: %d\n", blockCount);
printf("Starting GPU Computations\n\n");
// allocate device memory
hipMalloc((void **)(&dev_a), N*N*sizeof(int));
hipMalloc((void **)(&dev_b), N*N*sizeof(int));
hipMalloc((void **)(&dev_c), N*N*sizeof(int));
// copy array a,b (system memory) to dev_a, dev_b (device memory)
hipMemcpy(dev_a,a,N*N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,N*N*sizeof(int), hipMemcpyHostToDevice);
// launch kernels
MatrixMulKernel<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, N);
hipDeviceSynchronize();
// copy results from GPU back to system memory
hipMemcpy(d, dev_c, N*N*sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printf("GPU Multiplication of A * B:\n");
printMatrix(d, N);
// verify that CPU and GPU multiplication match
verifyMult(c, d, N);
// free system and device memory
free(a);
free(b);
free(c);
free(d);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
* CS-4370-90: Par. Prog. Many-Core GPUs
* Nathan Dunn
* Professor Liu
* 10/4/19
* Project 1 - Basic Matrix Multiplication
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
// -------- EDIT THESE --------------
#define N 8 // size of the matrix
#define BLOCK 4 // size of thread block
/**
* Performs matrix multiplication on the GPU device
* dev_a - first matrix to be multiplied
* dev_b - second matrix to be multiplied
* dev_c - result of a * b is stored in this matrix
* size - size of the matrix (size * size)
*/
__global__ void MatrixMulKernel(int *dev_a, int *dev_b, int *dev_c, int size){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int column = blockIdx.x*blockDim.x+threadIdx.x;
if(row < size && column < size){
int sum = 0;
for(int k = 0; k < size; k++){
sum += dev_a[row * size + k] * dev_b[k * size + column];
}
dev_c[row * size + column] = sum;
}
}
/**
* Performs matrix multiplication on the CPU
* a - first matrix to be multiplied
* b - second matrix to be multiplied
* c - result of a * b is stored in this matrix
* size - size of the matrix (size * size)
*/
void MatrixMulOnHost(int *a, int *b, int *c, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
int sum = 0;
for(int k = 0; k < size; k++){
int d = a[i * size + k];
int e = b[k * size + j];
sum += d * e;
}
c[i * size + j] = sum;
}
}
}
/**
Prints a matrix.
matrix - matrix to be printed
size - size of the matrix
*/
void printMatrix(int * matrix, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
printf("%d ", matrix[i * size + j]);
}
printf("\n");
}
printf("\n");
}
/**
Verifies that two matrices are equal.
a - first matrix to be compared
b - second matrix to be compared
size - size of the matrix
*/
void verifyMult(int *a, int *b, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
int index = i * size + j;
if(a[index] != b[index]){
goto FAILED;
}
}
}
printf("TEST PASSED!!!\n");
return;
FAILED: printf("TEST FAILED!!!\n");
}
int main(void){
// define block size and count
int blockSize = BLOCK;
int blockCount = ceil(N/double(blockSize));
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(blockCount, blockCount, 1);
int *a, *b, *c, *d;
int *dev_a, *dev_b, *dev_c;
// allocate memory for matrix A, B, C, D
a = (int*)malloc(sizeof(int)*N*N);
b = (int*)malloc(sizeof(int)*N*N);
c = (int*)malloc(sizeof(int)*N*N);
d = (int*)malloc(sizeof(int)*N*N);
// initialize arrays a and b
int init = 1325;
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
int index = i * N + j;
init = 3125*init%65536;
a[index] = (init-32768)/6553;
b[index] = init%1000;
}
}
// perform CPU matrix multiplication for gpu multiplication verification
MatrixMulOnHost(a, b, c, N);
printf("Matrix A:\n");
printMatrix(a, N);
printf("\nMatrix B:\n");
printMatrix(b, N);
printf("\nCPU Multiplication of A * B:\n");
printMatrix(c, N);
printf("Thread Block Count: %d\n", blockCount);
printf("Starting GPU Computations\n\n");
// allocate device memory
hipMalloc((void **)(&dev_a), N*N*sizeof(int));
hipMalloc((void **)(&dev_b), N*N*sizeof(int));
hipMalloc((void **)(&dev_c), N*N*sizeof(int));
// copy array a,b (system memory) to dev_a, dev_b (device memory)
hipMemcpy(dev_a,a,N*N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,N*N*sizeof(int), hipMemcpyHostToDevice);
// launch kernels
MatrixMulKernel<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, N);
hipDeviceSynchronize();
// copy results from GPU back to system memory
hipMemcpy(d, dev_c, N*N*sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printf("GPU Multiplication of A * B:\n");
printMatrix(d, N);
// verify that CPU and GPU multiplication match
verifyMult(c, d, N);
// free system and device memory
free(a);
free(b);
free(c);
free(d);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15MatrixMulKernelPiS_S_i
.globl _Z15MatrixMulKernelPiS_S_i
.p2align 8
.type _Z15MatrixMulKernelPiS_S_i,@function
_Z15MatrixMulKernelPiS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v3, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s4, s3, 16
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s15, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s14, s3, v[3:4]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB0_6
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v2, v0, s2
s_mov_b32 s3, s2
v_mov_b32_e32 v5, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[3:4], 2, v[2:3]
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
.p2align 6
.LBB0_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v6, 31, v5
s_add_i32 s3, s3, -1
s_cmp_eq_u32 s3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[5:6]
v_add_co_u32 v6, vcc_lo, s6, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
global_load_b32 v8, v[3:4], off
global_load_b32 v9, v[6:7], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[6:7], null, v9, v8, v[2:3]
v_add_co_u32 v3, vcc_lo, v3, 4
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3)
v_dual_mov_b32 v2, v6 :: v_dual_add_nc_u32 v5, s2, v5
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_5
.LBB0_4:
v_mov_b32_e32 v2, 0
.LBB0_5:
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[3:4], null, v0, s2, v[1:2]
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15MatrixMulKernelPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15MatrixMulKernelPiS_S_i, .Lfunc_end0-_Z15MatrixMulKernelPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15MatrixMulKernelPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15MatrixMulKernelPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
* CS-4370-90: Par. Prog. Many-Core GPUs
* Nathan Dunn
* Professor Liu
* 10/4/19
* Project 1 - Basic Matrix Multiplication
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
// -------- EDIT THESE --------------
#define N 8 // size of the matrix
#define BLOCK 4 // size of thread block
/**
* Performs matrix multiplication on the GPU device
* dev_a - first matrix to be multiplied
* dev_b - second matrix to be multiplied
* dev_c - result of a * b is stored in this matrix
* size - size of the matrix (size * size)
*/
__global__ void MatrixMulKernel(int *dev_a, int *dev_b, int *dev_c, int size){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int column = blockIdx.x*blockDim.x+threadIdx.x;
if(row < size && column < size){
int sum = 0;
for(int k = 0; k < size; k++){
sum += dev_a[row * size + k] * dev_b[k * size + column];
}
dev_c[row * size + column] = sum;
}
}
/**
* Performs matrix multiplication on the CPU
* a - first matrix to be multiplied
* b - second matrix to be multiplied
* c - result of a * b is stored in this matrix
* size - size of the matrix (size * size)
*/
void MatrixMulOnHost(int *a, int *b, int *c, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
int sum = 0;
for(int k = 0; k < size; k++){
int d = a[i * size + k];
int e = b[k * size + j];
sum += d * e;
}
c[i * size + j] = sum;
}
}
}
/**
Prints a matrix.
matrix - matrix to be printed
size - size of the matrix
*/
void printMatrix(int * matrix, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
printf("%d ", matrix[i * size + j]);
}
printf("\n");
}
printf("\n");
}
/**
Verifies that two matrices are equal.
a - first matrix to be compared
b - second matrix to be compared
size - size of the matrix
*/
void verifyMult(int *a, int *b, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
int index = i * size + j;
if(a[index] != b[index]){
goto FAILED;
}
}
}
printf("TEST PASSED!!!\n");
return;
FAILED: printf("TEST FAILED!!!\n");
}
int main(void){
// define block size and count
int blockSize = BLOCK;
int blockCount = ceil(N/double(blockSize));
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(blockCount, blockCount, 1);
int *a, *b, *c, *d;
int *dev_a, *dev_b, *dev_c;
// allocate memory for matrix A, B, C, D
a = (int*)malloc(sizeof(int)*N*N);
b = (int*)malloc(sizeof(int)*N*N);
c = (int*)malloc(sizeof(int)*N*N);
d = (int*)malloc(sizeof(int)*N*N);
// initialize arrays a and b
int init = 1325;
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
int index = i * N + j;
init = 3125*init%65536;
a[index] = (init-32768)/6553;
b[index] = init%1000;
}
}
// perform CPU matrix multiplication for gpu multiplication verification
MatrixMulOnHost(a, b, c, N);
printf("Matrix A:\n");
printMatrix(a, N);
printf("\nMatrix B:\n");
printMatrix(b, N);
printf("\nCPU Multiplication of A * B:\n");
printMatrix(c, N);
printf("Thread Block Count: %d\n", blockCount);
printf("Starting GPU Computations\n\n");
// allocate device memory
hipMalloc((void **)(&dev_a), N*N*sizeof(int));
hipMalloc((void **)(&dev_b), N*N*sizeof(int));
hipMalloc((void **)(&dev_c), N*N*sizeof(int));
// copy array a,b (system memory) to dev_a, dev_b (device memory)
hipMemcpy(dev_a,a,N*N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,N*N*sizeof(int), hipMemcpyHostToDevice);
// launch kernels
MatrixMulKernel<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, N);
hipDeviceSynchronize();
// copy results from GPU back to system memory
hipMemcpy(d, dev_c, N*N*sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printf("GPU Multiplication of A * B:\n");
printMatrix(d, N);
// verify that CPU and GPU multiplication match
verifyMult(c, d, N);
// free system and device memory
free(a);
free(b);
free(c);
free(d);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} | .text
.file "dunn_project1_mult.hip"
.globl _Z30__device_stub__MatrixMulKernelPiS_S_i # -- Begin function _Z30__device_stub__MatrixMulKernelPiS_S_i
.p2align 4, 0x90
.type _Z30__device_stub__MatrixMulKernelPiS_S_i,@function
_Z30__device_stub__MatrixMulKernelPiS_S_i: # @_Z30__device_stub__MatrixMulKernelPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15MatrixMulKernelPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z30__device_stub__MatrixMulKernelPiS_S_i, .Lfunc_end0-_Z30__device_stub__MatrixMulKernelPiS_S_i
.cfi_endproc
# -- End function
.globl _Z15MatrixMulOnHostPiS_S_i # -- Begin function _Z15MatrixMulOnHostPiS_S_i
.p2align 4, 0x90
.type _Z15MatrixMulOnHostPiS_S_i,@function
_Z15MatrixMulOnHostPiS_S_i: # @_Z15MatrixMulOnHostPiS_S_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdx, -8(%rsp) # 8-byte Spill
testl %ecx, %ecx
jle .LBB1_7
# %bb.1: # %.preheader28.lr.ph
movl %ecx, %eax
leaq (,%rax,4), %r8
xorl %edx, %edx
xorl %r10d, %r10d
.p2align 4, 0x90
.LBB1_2: # %.preheader28
# =>This Loop Header: Depth=1
# Child Loop BB1_3 Depth 2
# Child Loop BB1_4 Depth 3
movl %edx, %r11d
leaq (%rdi,%r11,4), %r11
movq %r10, %rbx
imulq %rax, %rbx
movq -8(%rsp), %r9 # 8-byte Reload
leaq (%r9,%rbx,4), %rbx
movq %rsi, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_3: # %.preheader
# Parent Loop BB1_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB1_4 Depth 3
xorl %r12d, %r12d
movq %r14, %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_4: # Parent Loop BB1_2 Depth=1
# Parent Loop BB1_3 Depth=2
# => This Inner Loop Header: Depth=3
movl (%r13), %r9d
imull (%r11,%r12,4), %r9d
addl %r9d, %ebp
incq %r12
addq %r8, %r13
cmpq %r12, %rax
jne .LBB1_4
# %bb.5: # %._crit_edge
# in Loop: Header=BB1_3 Depth=2
movl %ebp, (%rbx,%r15,4)
incq %r15
addq $4, %r14
cmpq %rax, %r15
jne .LBB1_3
# %bb.6: # %._crit_edge32
# in Loop: Header=BB1_2 Depth=1
incq %r10
addl %ecx, %edx
cmpq %rax, %r10
jne .LBB1_2
.LBB1_7: # %._crit_edge34
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z15MatrixMulOnHostPiS_S_i, .Lfunc_end1-_Z15MatrixMulOnHostPiS_S_i
.cfi_endproc
# -- End function
.globl _Z11printMatrixPii # -- Begin function _Z11printMatrixPii
.p2align 4, 0x90
.type _Z11printMatrixPii,@function
_Z11printMatrixPii: # @_Z11printMatrixPii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, (%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB2_5
# %bb.1: # %.preheader.lr.ph
movl %esi, %ebx
movl %esi, %r15d
xorl %ebp, %ebp
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
movl %ebp, %eax
movq (%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_3: # Parent Loop BB2_2 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r13,%r14,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r14
cmpq %r14, %r15
jne .LBB2_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB2_2 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r12
addl %ebx, %ebp
cmpq %r15, %r12
jne .LBB2_2
.LBB2_5: # %._crit_edge14
movl $10, %edi
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp putchar@PLT # TAILCALL
.Lfunc_end2:
.size _Z11printMatrixPii, .Lfunc_end2-_Z11printMatrixPii
.cfi_endproc
# -- End function
.globl _Z10verifyMultPiS_i # -- Begin function _Z10verifyMultPiS_i
.p2align 4, 0x90
.type _Z10verifyMultPiS_i,@function
_Z10verifyMultPiS_i: # @_Z10verifyMultPiS_i
.cfi_startproc
# %bb.0:
movq %rdi, %rax
movl $.Lstr.1, %edi
testl %edx, %edx
setg %cl
jle puts@PLT # TAILCALL
# %bb.1: # %.preheader.lr.ph
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, %edx
leaq 4(%rax), %r8
leaq (,%rdx,4), %r9
leaq 4(%rsi), %r10
leaq -1(%rdx), %r11
xorl %ebx, %ebx
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_7: # %.critedge
# in Loop: Header=BB3_2 Depth=1
incq %rbx
cmpq %rdx, %rbx
setb %cl
addq %r9, %r8
addq %r9, %r10
cmpq %rdx, %rbx
je .LBB3_9
.LBB3_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
movq %rbx, %r14
imulq %rdx, %r14
movl (%rax,%r14,4), %ebp
cmpl (%rsi,%r14,4), %ebp
jne .LBB3_8
# %bb.3: # %.lr.ph.preheader
# in Loop: Header=BB3_2 Depth=1
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_4: # %.lr.ph
# Parent Loop BB3_2 Depth=1
# => This Inner Loop Header: Depth=2
cmpq %r14, %r11
je .LBB3_7
# %bb.5: # in Loop: Header=BB3_4 Depth=2
movl (%r8,%r14,4), %ebp
leaq 1(%r14), %r15
cmpl (%r10,%r14,4), %ebp
movq %r15, %r14
je .LBB3_4
# %bb.6: # %._crit_edge
# in Loop: Header=BB3_2 Depth=1
cmpq %rdx, %r15
jae .LBB3_7
.LBB3_8: # %.critedge44
movl $.Lstr, %eax
movl $.Lstr.1, %edi
testb $1, %cl
cmovneq %rax, %rdi
.LBB3_9:
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
jmp puts@PLT # TAILCALL
.Lfunc_end3:
.size _Z10verifyMultPiS_i, .Lfunc_end3-_Z10verifyMultPiS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $256, %edi # imm = 0x100
callq malloc
movq %rax, %rbx
movl $256, %edi # imm = 0x100
callq malloc
movq %rax, %r14
movl $256, %edi # imm = 0x100
callq malloc
movq %rax, %r15
movl $256, %edi # imm = 0x100
callq malloc
movq %rax, %r12
movl $1325, %ecx # imm = 0x52D
xorl %eax, %eax
movq %rbx, %rdx
movq %r14, %rsi
.p2align 4, 0x90
.LBB4_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB4_2 Depth 2
xorl %edi, %edi
.p2align 4, 0x90
.LBB4_2: # Parent Loop BB4_1 Depth=1
# => This Inner Loop Header: Depth=2
imull $3125, %ecx, %ecx # imm = 0xC35
movzwl %cx, %ecx
leal -32768(%rcx), %r8d
movslq %r8d, %r8
imulq $-1610366953, %r8, %r8 # imm = 0xA003C017
shrq $32, %r8
addl %ecx, %r8d
addl $-32768, %r8d # imm = 0x8000
movl %r8d, %r9d
shrl $31, %r9d
sarl $12, %r8d
addl %r9d, %r8d
movl %r8d, (%rdx,%rdi,4)
imulq $4294968, %rcx, %r8 # imm = 0x418938
shrq $32, %r8
imull $1000, %r8d, %r8d # imm = 0x3E8
movl %ecx, %r9d
subl %r8d, %r9d
movl %r9d, (%rsi,%rdi,4)
incq %rdi
cmpq $8, %rdi
jne .LBB4_2
# %bb.3: # in Loop: Header=BB4_1 Depth=1
incq %rax
addq $32, %rsi
addq $32, %rdx
cmpq $8, %rax
jne .LBB4_1
# %bb.4: # %.preheader28.i.preheader
xorl %eax, %eax
movq %rbx, %rcx
.p2align 4, 0x90
.LBB4_5: # %.preheader28.i
# =>This Loop Header: Depth=1
# Child Loop BB4_6 Depth 2
# Child Loop BB4_7 Depth 3
movq %rax, %rdx
shlq $5, %rdx
addq %r15, %rdx
movq %r14, %rsi
xorl %edi, %edi
.p2align 4, 0x90
.LBB4_6: # %.preheader.i
# Parent Loop BB4_5 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB4_7 Depth 3
xorl %r8d, %r8d
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB4_7: # Parent Loop BB4_5 Depth=1
# Parent Loop BB4_6 Depth=2
# => This Inner Loop Header: Depth=3
movl (%rsi,%r8,8), %r10d
imull (%rcx,%r8), %r10d
addl %r10d, %r9d
addq $4, %r8
cmpq $32, %r8
jne .LBB4_7
# %bb.8: # %._crit_edge.i
# in Loop: Header=BB4_6 Depth=2
movl %r9d, (%rdx,%rdi,4)
incq %rdi
addq $4, %rsi
cmpq $8, %rdi
jne .LBB4_6
# %bb.9: # %._crit_edge32.i
# in Loop: Header=BB4_5 Depth=1
incq %rax
addq $32, %rcx
cmpq $8, %rax
jne .LBB4_5
# %bb.10: # %_Z15MatrixMulOnHostPiS_S_i.exit
movl $.Lstr.2, %edi
callq puts@PLT
movq %rbx, 32(%rsp) # 8-byte Spill
movq %rbx, %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB4_11: # %.preheader.i47
# =>This Loop Header: Depth=1
# Child Loop BB4_12 Depth 2
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_12: # Parent Loop BB4_11 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r13,%rbx,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $8, %rbx
jne .LBB4_12
# %bb.13: # %._crit_edge.i51
# in Loop: Header=BB4_11 Depth=1
movl $10, %edi
callq putchar@PLT
incq %rbp
addq $32, %r13
cmpq $8, %rbp
jne .LBB4_11
# %bb.14: # %_Z11printMatrixPii.exit
movl $10, %edi
callq putchar@PLT
movl $.Lstr.3, %edi
callq puts@PLT
movq %r14, %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB4_15: # %.preheader.i52
# =>This Loop Header: Depth=1
# Child Loop BB4_16 Depth 2
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_16: # Parent Loop BB4_15 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r13,%rbx,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $8, %rbx
jne .LBB4_16
# %bb.17: # %._crit_edge.i57
# in Loop: Header=BB4_15 Depth=1
movl $10, %edi
callq putchar@PLT
incq %rbp
addq $32, %r13
cmpq $8, %rbp
jne .LBB4_15
# %bb.18: # %_Z11printMatrixPii.exit62
movl $10, %edi
callq putchar@PLT
movl $.Lstr.4, %edi
callq puts@PLT
movq %r15, %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB4_19: # %.preheader.i63
# =>This Loop Header: Depth=1
# Child Loop BB4_20 Depth 2
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_20: # Parent Loop BB4_19 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r13,%rbx,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $8, %rbx
jne .LBB4_20
# %bb.21: # %._crit_edge.i68
# in Loop: Header=BB4_19 Depth=1
movl $10, %edi
callq putchar@PLT
incq %rbp
addq $32, %r13
cmpq $8, %rbp
jne .LBB4_19
# %bb.22: # %_Z11printMatrixPii.exit73
movl $10, %edi
callq putchar@PLT
movl $.L.str.7, %edi
movl $2, %esi
xorl %eax, %eax
callq printf
movl $.Lstr.5, %edi
callq puts@PLT
leaq 16(%rsp), %rdi
movl $256, %esi # imm = 0x100
callq hipMalloc
leaq 8(%rsp), %rdi
movl $256, %esi # imm = 0x100
callq hipMalloc
movq %rsp, %rdi
movl $256, %esi # imm = 0x100
callq hipMalloc
movq 16(%rsp), %rdi
movl $256, %edx # imm = 0x100
movq 32(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $256, %edx # imm = 0x100
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $8589934594, %rdi # imm = 0x200000002
movabsq $17179869188, %rdx # imm = 0x400000004
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_24
# %bb.23:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $8, 28(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 28(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z15MatrixMulKernelPiS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_24:
callq hipDeviceSynchronize
movq (%rsp), %rsi
movl $256, %edx # imm = 0x100
movq %r12, %rdi
movl $2, %ecx
callq hipMemcpy
callq hipDeviceSynchronize
movl $.Lstr.6, %edi
callq puts@PLT
movq %r12, %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB4_25: # %.preheader.i74
# =>This Loop Header: Depth=1
# Child Loop BB4_26 Depth 2
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_26: # Parent Loop BB4_25 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r13,%rbx,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $8, %rbx
jne .LBB4_26
# %bb.27: # %._crit_edge.i79
# in Loop: Header=BB4_25 Depth=1
movl $10, %edi
callq putchar@PLT
incq %rbp
addq $32, %r13
cmpq $8, %rbp
jne .LBB4_25
# %bb.28: # %_Z11printMatrixPii.exit84
movl $10, %edi
callq putchar@PLT
movq %r12, %rax
addq $4, %rax
movq %r15, %rcx
addq $4, %rcx
movb $1, %dl
xorl %esi, %esi
movq 32(%rsp), %rbx # 8-byte Reload
.LBB4_29: # %.preheader.i85
# =>This Loop Header: Depth=1
# Child Loop BB4_31 Depth 2
movq %rsi, %rdi
shlq $5, %rdi
movl (%r15,%rdi), %r8d
cmpl (%r12,%rdi), %r8d
jne .LBB4_33
# %bb.30: # %.lr.ph.preheader
# in Loop: Header=BB4_29 Depth=1
xorl %edi, %edi
.p2align 4, 0x90
.LBB4_31: # %.lr.ph
# Parent Loop BB4_29 Depth=1
# => This Inner Loop Header: Depth=2
cmpq $7, %rdi
je .LBB4_34
# %bb.32: # in Loop: Header=BB4_31 Depth=2
movl (%rcx,%rdi,4), %r8d
leaq 1(%rdi), %r9
cmpl (%rax,%rdi,4), %r8d
movq %r9, %rdi
je .LBB4_31
jmp .LBB4_33
.p2align 4, 0x90
.LBB4_34: # %.critedge.i
# in Loop: Header=BB4_29 Depth=1
cmpq $7, %rsi
leaq 1(%rsi), %rdi
setb %dl
addq $32, %rax
addq $32, %rcx
movq %rdi, %rsi
cmpq $8, %rdi
jne .LBB4_29
# %bb.35:
movl $.Lstr.1, %edi
jmp .LBB4_36
.LBB4_33: # %.critedge
movl $.Lstr, %eax
movl $.Lstr.1, %edi
testb $1, %dl
cmovneq %rax, %rdi
.LBB4_36: # %_Z10verifyMultPiS_i.exit
callq puts@PLT
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15MatrixMulKernelPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15MatrixMulKernelPiS_S_i,@object # @_Z15MatrixMulKernelPiS_S_i
.section .rodata,"a",@progbits
.globl _Z15MatrixMulKernelPiS_S_i
.p2align 3, 0x0
_Z15MatrixMulKernelPiS_S_i:
.quad _Z30__device_stub__MatrixMulKernelPiS_S_i
.size _Z15MatrixMulKernelPiS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d "
.size .L.str, 4
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Thread Block Count: %d\n"
.size .L.str.7, 24
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z15MatrixMulKernelPiS_S_i"
.size .L__unnamed_1, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "TEST FAILED!!!"
.size .Lstr, 15
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "TEST PASSED!!!"
.size .Lstr.1, 15
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Matrix A:"
.size .Lstr.2, 10
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "\nMatrix B:"
.size .Lstr.3, 11
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "\nCPU Multiplication of A * B:"
.size .Lstr.4, 30
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "Starting GPU Computations\n"
.size .Lstr.5, 27
.type .Lstr.6,@object # @str.6
.Lstr.6:
.asciz "GPU Multiplication of A * B:"
.size .Lstr.6, 29
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__MatrixMulKernelPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15MatrixMulKernelPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z15MatrixMulKernelPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e280000002100 */
/*0030*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e680000002600 */
/*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0205 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x002fca00078e0202 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x178], P0 ; /* 0x00005e0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ MOV R2, c[0x0][0x178] ; /* 0x00005e0000027a02 */
/* 0x000fe20000000f00 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ HFMA2.MMA R28, -RZ, RZ, 0, 0 ; /* 0x00000000ff1c7435 */
/* 0x000fe200000001ff */
/*00d0*/ IMAD R3, R3, c[0x0][0x178], RZ ; /* 0x00005e0003037a24 */
/* 0x000fe200078e02ff */
/*00e0*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fda0003f06270 */
/*00f0*/ @!P0 BRA 0xbf0 ; /* 0x00000af000008947 */
/* 0x000fea0003800000 */
/*0100*/ IADD3 R4, R2.reuse, -0x1, RZ ; /* 0xffffffff02047810 */
/* 0x040fe40007ffe0ff */
/*0110*/ LOP3.LUT R5, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302057812 */
/* 0x000fe400078ec0ff */
/*0120*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */
/* 0x000fe40003f06070 */
/*0130*/ MOV R4, RZ ; /* 0x000000ff00047202 */
/* 0x000fe40000000f00 */
/*0140*/ MOV R28, RZ ; /* 0x000000ff001c7202 */
/* 0x000fd20000000f00 */
/*0150*/ @!P0 BRA 0xaf0 ; /* 0x0000099000008947 */
/* 0x000fea0003800000 */
/*0160*/ IADD3 R6, -R5, c[0x0][0x178], RZ ; /* 0x00005e0005067a10 */
/* 0x000fe20007ffe1ff */
/*0170*/ HFMA2.MMA R25, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff197435 */
/* 0x000fe200000001ff */
/*0180*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*0190*/ MOV R4, RZ ; /* 0x000000ff00047202 */
/* 0x000fe40000000f00 */
/*01a0*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fcc0003f04270 */
/*01b0*/ IMAD.WIDE R24, R0, R25, c[0x0][0x168] ; /* 0x00005a0000187625 */
/* 0x000fce00078e0219 */
/*01c0*/ @!P0 BRA 0x960 ; /* 0x0000079000008947 */
/* 0x000fea0003800000 */
/*01d0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe40003f24270 */
/*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01f0*/ @!P1 BRA 0x6a0 ; /* 0x000004a000009947 */
/* 0x000fea0003800000 */
/*0200*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0210*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0220*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x0000a2000c1e1900 */
/*0230*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fca0008000f00 */
/*0240*/ IMAD.WIDE R12, R3, 0x4, R12 ; /* 0x00000004030c7825 */
/* 0x000fca00078e020c */
/*0250*/ LDG.E R27, [R12.64] ; /* 0x000000040c1b7981 */
/* 0x000ea2000c1e1900 */
/*0260*/ IMAD.WIDE R10, R2, 0x4, R24 ; /* 0x00000004020a7825 */
/* 0x000fc600078e0218 */
/*0270*/ LDG.E R17, [R12.64+0x4] ; /* 0x000004040c117981 */
/* 0x000ee6000c1e1900 */
/*0280*/ IMAD.WIDE R18, R2.reuse, 0x4, R10 ; /* 0x0000000402127825 */
/* 0x040fe200078e020a */
/*0290*/ LDG.E R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x0002e8000c1e1900 */
/*02a0*/ LDG.E R7, [R12.64+0xc] ; /* 0x00000c040c077981 */
/* 0x000f22000c1e1900 */
/*02b0*/ IMAD.WIDE R14, R2, 0x4, R18 ; /* 0x00000004020e7825 */
/* 0x000fc600078e0212 */
/*02c0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000b26000c1e1900 */
/*02d0*/ IMAD.WIDE R20, R2.reuse, 0x4, R14 ; /* 0x0000000402147825 */
/* 0x040fe200078e020e */
/*02e0*/ LDG.E R26, [R14.64] ; /* 0x000000040e1a7981 */
/* 0x000128000c1e1900 */
/*02f0*/ LDG.E R9, [R12.64+0x10] ; /* 0x000010040c097981 */
/* 0x000f28000c1e1900 */
/*0300*/ LDG.E R19, [R12.64+0x8] ; /* 0x000008040c137981 */
/* 0x020f22000c1e1900 */
/*0310*/ IMAD.WIDE R14, R2, 0x4, R20 ; /* 0x00000004020e7825 */
/* 0x001fc600078e0214 */
/*0320*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000166000c1e1900 */
/*0330*/ IMAD.WIDE R22, R2.reuse, 0x4, R14 ; /* 0x0000000402167825 */
/* 0x040fe200078e020e */
/*0340*/ LDG.E R8, [R14.64] ; /* 0x000000040e087981 */
/* 0x000168000c1e1900 */
/*0350*/ LDG.E R11, [R12.64+0x14] ; /* 0x000014040c0b7981 */
/* 0x002f62000c1e1900 */
/*0360*/ IMAD.WIDE R24, R2, 0x4, R22 ; /* 0x0000000402187825 */
/* 0x000fc600078e0216 */
/*0370*/ LDG.E R10, [R22.64] ; /* 0x00000004160a7981 */
/* 0x000368000c1e1900 */
/*0380*/ LDG.E R21, [R12.64+0x18] ; /* 0x000018040c157981 */
/* 0x001f62000c1e1900 */
/*0390*/ IMAD R29, R29, R27, R28 ; /* 0x0000001b1d1d7224 */
/* 0x004fc600078e021c */
/*03a0*/ LDG.E R27, [R12.64+0x1c] ; /* 0x00001c040c1b7981 */
/* 0x000ea8000c1e1900 */
/*03b0*/ LDG.E R28, [R24.64] ; /* 0x00000004181c7981 */
/* 0x0000a2000c1e1900 */
/*03c0*/ IMAD.WIDE R14, R2, 0x4, R24 ; /* 0x00000004020e7825 */
/* 0x000fc800078e0218 */
/*03d0*/ IMAD R29, R16, R17, R29 ; /* 0x00000011101d7224 */
/* 0x008fe400078e021d */
/*03e0*/ IMAD.WIDE R16, R2, 0x4, R14 ; /* 0x0000000402107825 */
/* 0x000fe400078e020e */
/*03f0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x0006a4000c1e1900 */
/*0400*/ IMAD R29, R18, R19, R29 ; /* 0x00000013121d7224 */
/* 0x010fe400078e021d */
/*0410*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */
/* 0x000fe400078e0210 */
/*0420*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x0008a4000c1e1900 */
/*0430*/ IMAD R26, R26, R7, R29 ; /* 0x000000071a1a7224 */
/* 0x000fc400078e021d */
/*0440*/ IMAD.WIDE R22, R2.reuse, 0x4, R18 ; /* 0x0000000402167825 */
/* 0x042fe200078e0212 */
/*0450*/ LDG.E R7, [R12.64+0x20] ; /* 0x000020040c077981 */
/* 0x000ea8000c1e1900 */
/*0460*/ LDG.E R29, [R12.64+0x24] ; /* 0x000024040c1d7981 */
/* 0x000ea2000c1e1900 */
/*0470*/ IMAD.WIDE R24, R2, 0x4, R22 ; /* 0x0000000402187825 */
/* 0x001fc600078e0216 */
/*0480*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x0000a2000c1e1900 */
/*0490*/ IMAD R9, R20, R9, R26 ; /* 0x0000000914097224 */
/* 0x020fc600078e021a */
/*04a0*/ LDG.E R26, [R12.64+0x28] ; /* 0x000028040c1a7981 */
/* 0x000f62000c1e1900 */
/*04b0*/ IMAD R11, R8, R11, R9 ; /* 0x0000000b080b7224 */
/* 0x000fe400078e0209 */
/*04c0*/ IMAD.WIDE R8, R2, 0x4, R24 ; /* 0x0000000402087825 */
/* 0x000fe200078e0218 */
/*04d0*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x000368000c1e1900 */
/*04e0*/ LDG.E R17, [R12.64+0x2c] ; /* 0x00002c040c117981 */
/* 0x010f22000c1e1900 */
/*04f0*/ IMAD R21, R10, R21, R11 ; /* 0x000000150a157224 */
/* 0x000fc600078e020b */
/*0500*/ LDG.E R15, [R24.64] ; /* 0x00000004180f7981 */
/* 0x008722000c1e1900 */
/*0510*/ IMAD.WIDE R10, R2, 0x4, R8 ; /* 0x00000004020a7825 */
/* 0x000fc600078e0208 */
/*0520*/ LDG.E R19, [R8.64] ; /* 0x0000000408137981 */
/* 0x001128000c1e1900 */
/*0530*/ LDG.E R23, [R10.64] ; /* 0x000000040a177981 */
/* 0x002f28000c1e1900 */
/*0540*/ LDG.E R24, [R12.64+0x30] ; /* 0x000030040c187981 */
/* 0x008ee8000c1e1900 */
/*0550*/ LDG.E R25, [R12.64+0x38] ; /* 0x000038040c197981 */
/* 0x000ee8000c1e1900 */
/*0560*/ LDG.E R8, [R12.64+0x3c] ; /* 0x00003c040c087981 */
/* 0x001ee2000c1e1900 */
/*0570*/ IMAD R9, R28, R27, R21 ; /* 0x0000001b1c097224 */
/* 0x004fc600078e0215 */
/*0580*/ LDG.E R28, [R12.64+0x34] ; /* 0x000034040c1c7981 */
/* 0x000ea2000c1e1900 */
/*0590*/ IMAD.WIDE R20, R2, 0x4, R10 ; /* 0x0000000402147825 */
/* 0x000fca00078e020a */
/*05a0*/ LDG.E R27, [R20.64] ; /* 0x00000004141b7981 */
/* 0x000ea2000c1e1900 */
/*05b0*/ IADD3 R6, R6, -0x10, RZ ; /* 0xfffffff006067810 */
/* 0x000fc80007ffe0ff */
/*05c0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe20003f24270 */
/*05d0*/ IMAD R7, R14, R7, R9 ; /* 0x000000070e077224 */
/* 0x000fc800078e0209 */
/*05e0*/ IMAD R7, R16, R29, R7 ; /* 0x0000001d10077224 */
/* 0x000fc800078e0207 */
/*05f0*/ IMAD R7, R18, R26, R7 ; /* 0x0000001a12077224 */
/* 0x020fc800078e0207 */
/*0600*/ IMAD R7, R22, R17, R7 ; /* 0x0000001116077224 */
/* 0x010fe200078e0207 */
/*0610*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*0620*/ IADD3 R4, R4, 0x10, RZ ; /* 0x0000001004047810 */
/* 0x000fc60007ffe0ff */
/*0630*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0640*/ IMAD R7, R15, R24, R7 ; /* 0x000000180f077224 */
/* 0x008fc800078e0207 */
/*0650*/ IMAD R28, R19, R28, R7 ; /* 0x0000001c131c7224 */
/* 0x004fc800078e0207 */
/*0660*/ IMAD R28, R23, R25, R28 ; /* 0x00000019171c7224 */
/* 0x000fe400078e021c */
/*0670*/ IMAD.WIDE R24, R2, 0x4, R20 ; /* 0x0000000402187825 */
/* 0x000fc800078e0214 */
/*0680*/ IMAD R28, R27, R8, R28 ; /* 0x000000081b1c7224 */
/* 0x000fe200078e021c */
/*0690*/ @P1 BRA 0x210 ; /* 0xfffffb7000001947 */
/* 0x000fea000383ffff */
/*06a0*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */
/* 0x000fda0003f24270 */
/*06b0*/ @!P1 BRA 0x940 ; /* 0x0000028000009947 */
/* 0x000fea0003800000 */
/*06c0*/ IMAD.WIDE R16, R2, 0x4, R24 ; /* 0x0000000402107825 */
/* 0x000fe200078e0218 */
/*06d0*/ MOV R8, UR6 ; /* 0x0000000600087c02 */
/* 0x000fe20008000f00 */
/*06e0*/ LDG.E R7, [R24.64] ; /* 0x0000000418077981 */
/* 0x0000a2000c1e1900 */
/*06f0*/ MOV R9, UR7 ; /* 0x0000000700097c02 */
/* 0x000fc60008000f00 */
/*0700*/ IMAD.WIDE R12, R2.reuse, 0x4, R16 ; /* 0x00000004020c7825 */
/* 0x040fe200078e0210 */
/*0710*/ LDG.E R21, [R16.64] ; /* 0x0000000410157981 */
/* 0x0002e6000c1e1900 */
/*0720*/ IMAD.WIDE R8, R3, 0x4, R8 ; /* 0x0000000403087825 */
/* 0x000fe200078e0208 */
/*0730*/ LDG.E R23, [R12.64] ; /* 0x000000040c177981 */
/* 0x000966000c1e1900 */
/*0740*/ IMAD.WIDE R14, R2.reuse, 0x4, R12 ; /* 0x00000004020e7825 */
/* 0x040fe200078e020c */
/*0750*/ LDG.E R20, [R8.64] ; /* 0x0000000408147981 */
/* 0x000ea8000c1e1900 */
/*0760*/ LDG.E R22, [R8.64+0x4] ; /* 0x0000040408167981 */
/* 0x000ee2000c1e1900 */
/*0770*/ IMAD.WIDE R10, R2, 0x4, R14 ; /* 0x00000004020a7825 */
/* 0x000fc600078e020e */
/*0780*/ LDG.E R26, [R8.64+0x8] ; /* 0x00000804081a7981 */
/* 0x000f66000c1e1900 */
/*0790*/ IMAD.WIDE R16, R2.reuse, 0x4, R10 ; /* 0x0000000402107825 */
/* 0x042fe200078e020a */
/*07a0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000368000c1e1900 */
/*07b0*/ LDG.E R27, [R8.64+0xc] ; /* 0x00000c04081b7981 */
/* 0x000f62000c1e1900 */
/*07c0*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */
/* 0x000fc600078e0210 */
/*07d0*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000368000c1e1900 */
/*07e0*/ LDG.E R25, [R8.64+0x10] ; /* 0x0000100408197981 */
/* 0x001f62000c1e1900 */
/*07f0*/ IMAD.WIDE R12, R2, 0x4, R18 ; /* 0x00000004020c7825 */
/* 0x010fc600078e0212 */
/*0800*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000f28000c1e1900 */
/*0810*/ LDG.E R29, [R8.64+0x14] ; /* 0x00001404081d7981 */
/* 0x000f28000c1e1900 */
/*0820*/ LDG.E R24, [R18.64] ; /* 0x0000000412187981 */
/* 0x000128000c1e1900 */
/*0830*/ LDG.E R11, [R8.64+0x18] ; /* 0x00001804080b7981 */
/* 0x002f28000c1e1900 */
/*0840*/ LDG.E R15, [R12.64] ; /* 0x000000040c0f7981 */
/* 0x000f28000c1e1900 */
/*0850*/ LDG.E R18, [R8.64+0x1c] ; /* 0x00001c0408127981 */
/* 0x001f22000c1e1900 */
/*0860*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*0870*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40003f0e170 */
/*0880*/ IADD3 R4, R4, 0x8, RZ ; /* 0x0000000804047810 */
/* 0x000fe40007ffe0ff */
/*0890*/ IADD3 R6, R6, -0x8, RZ ; /* 0xfffffff806067810 */
/* 0x000fe20007ffe0ff */
/*08a0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*08b0*/ IMAD R7, R7, R20, R28 ; /* 0x0000001407077224 */
/* 0x004fc800078e021c */
/*08c0*/ IMAD R7, R21, R22, R7 ; /* 0x0000001615077224 */
/* 0x008fc800078e0207 */
/*08d0*/ IMAD R7, R23, R26, R7 ; /* 0x0000001a17077224 */
/* 0x020fc800078e0207 */
/*08e0*/ IMAD R7, R14, R27, R7 ; /* 0x0000001b0e077224 */
/* 0x000fc800078e0207 */
/*08f0*/ IMAD R7, R10, R25, R7 ; /* 0x000000190a077224 */
/* 0x000fc800078e0207 */
/*0900*/ IMAD R7, R16, R29, R7 ; /* 0x0000001d10077224 */
/* 0x010fc800078e0207 */
/*0910*/ IMAD R7, R24, R11, R7 ; /* 0x0000000b18077224 */
/* 0x000fe400078e0207 */
/*0920*/ IMAD.WIDE R24, R2, 0x4, R12 ; /* 0x0000000402187825 */
/* 0x000fc800078e020c */
/*0930*/ IMAD R28, R15, R18, R7 ; /* 0x000000120f1c7224 */
/* 0x000fe400078e0207 */
/*0940*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */
/* 0x000fda0000705670 */
/*0950*/ @!P0 BRA 0xaf0 ; /* 0x0000019000008947 */
/* 0x000fea0003800000 */
/*0960*/ MOV R8, UR6 ; /* 0x0000000600087c02 */
/* 0x000fe20008000f00 */
/*0970*/ IMAD.WIDE R14, R2, 0x4, R24 ; /* 0x00000004020e7825 */
/* 0x000fe200078e0218 */
/*0980*/ MOV R9, UR7 ; /* 0x0000000700097c02 */
/* 0x000fe20008000f00 */
/*0990*/ LDG.E R25, [R24.64] ; /* 0x0000000418197981 */
/* 0x000ea8000c1e1900 */
/*09a0*/ IMAD.WIDE R8, R3, 0x4, R8 ; /* 0x0000000403087825 */
/* 0x000fc800078e0208 */
/*09b0*/ IMAD.WIDE R12, R2.reuse, 0x4, R14 ; /* 0x00000004020c7825 */
/* 0x040fe200078e020e */
/*09c0*/ LDG.E R7, [R8.64] ; /* 0x0000000408077981 */
/* 0x000ea8000c1e1900 */
/*09d0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ee2000c1e1900 */
/*09e0*/ IMAD.WIDE R10, R2, 0x4, R12 ; /* 0x00000004020a7825 */
/* 0x000fc600078e020c */
/*09f0*/ LDG.E R16, [R8.64+0x4] ; /* 0x0000040408107981 */
/* 0x000ee8000c1e1900 */
/*0a00*/ LDG.E R18, [R12.64] ; /* 0x000000040c127981 */
/* 0x000f28000c1e1900 */
/*0a10*/ LDG.E R17, [R8.64+0x8] ; /* 0x0000080408117981 */
/* 0x000f28000c1e1900 */
/*0a20*/ LDG.E R19, [R8.64+0xc] ; /* 0x00000c0408137981 */
/* 0x000f68000c1e1900 */
/*0a30*/ LDG.E R20, [R10.64] ; /* 0x000000040a147981 */
/* 0x000f62000c1e1900 */
/*0a40*/ IADD3 R6, R6, -0x4, RZ ; /* 0xfffffffc06067810 */
/* 0x000fc80007ffe0ff */
/*0a50*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f05270 */
/*0a60*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0a70*/ IADD3 R4, R4, 0x4, RZ ; /* 0x0000000404047810 */
/* 0x000fc60007ffe0ff */
/*0a80*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0a90*/ IMAD R7, R25, R7, R28 ; /* 0x0000000719077224 */
/* 0x004fc800078e021c */
/*0aa0*/ IMAD R7, R14, R16, R7 ; /* 0x000000100e077224 */
/* 0x008fe400078e0207 */
/*0ab0*/ IMAD.WIDE R24, R2, 0x4, R10 ; /* 0x0000000402187825 */
/* 0x000fc800078e020a */
/*0ac0*/ IMAD R7, R18, R17, R7 ; /* 0x0000001112077224 */
/* 0x010fc800078e0207 */
/*0ad0*/ IMAD R28, R20, R19, R7 ; /* 0x00000013141c7224 */
/* 0x020fe200078e0207 */
/*0ae0*/ @P0 BRA 0x960 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0af0*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05270 */
/*0b00*/ @!P0 BRA 0xbf0 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0b10*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0b20*/ IADD3 R6, R3, R4, RZ ; /* 0x0000000403067210 */
/* 0x000fe20007ffe0ff */
/*0b30*/ IMAD R4, R4, c[0x0][0x178], R0 ; /* 0x00005e0004047a24 */
/* 0x000fd000078e0200 */
/*0b40*/ IMAD.WIDE R6, R6, R9, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fc800078e0209 */
/*0b50*/ IMAD.WIDE R8, R4, R9, c[0x0][0x168] ; /* 0x00005a0004087625 */
/* 0x000fca00078e0209 */
/*0b60*/ LDG.E R11, [R8.64] ; /* 0x00000004080b7981 */
/* 0x0000a8000c1e1900 */
/*0b70*/ LDG.E R4, [R6.64] ; /* 0x0000000406047981 */
/* 0x0002a2000c1e1900 */
/*0b80*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */
/* 0x000fc80007ffe0ff */
/*0b90*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f05270 */
/*0ba0*/ IMAD.WIDE R8, R2, 0x4, R8 ; /* 0x0000000402087825 */
/* 0x001fe200078e0208 */
/*0bb0*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x002fc80007f3e0ff */
/*0bc0*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*0bd0*/ IMAD R28, R11, R4, R28 ; /* 0x000000040b1c7224 */
/* 0x004fcc00078e021c */
/*0be0*/ @P0 BRA 0xb60 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0bf0*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */
/* 0x000fe40007ffe0ff */
/*0c00*/ MOV R2, 0x4 ; /* 0x0000000400027802 */
/* 0x000fca0000000f00 */
/*0c10*/ IMAD.WIDE R2, R3, R2, c[0x0][0x170] ; /* 0x00005c0003027625 */
/* 0x000fca00078e0202 */
/*0c20*/ STG.E [R2.64], R28 ; /* 0x0000001c02007986 */
/* 0x000fe2000c101904 */
/*0c30*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c40*/ BRA 0xc40; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15MatrixMulKernelPiS_S_i
.globl _Z15MatrixMulKernelPiS_S_i
.p2align 8
.type _Z15MatrixMulKernelPiS_S_i,@function
_Z15MatrixMulKernelPiS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v3, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s4, s3, 16
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s15, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s14, s3, v[3:4]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB0_6
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v2, v0, s2
s_mov_b32 s3, s2
v_mov_b32_e32 v5, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[3:4], 2, v[2:3]
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
.p2align 6
.LBB0_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v6, 31, v5
s_add_i32 s3, s3, -1
s_cmp_eq_u32 s3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[5:6]
v_add_co_u32 v6, vcc_lo, s6, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
global_load_b32 v8, v[3:4], off
global_load_b32 v9, v[6:7], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[6:7], null, v9, v8, v[2:3]
v_add_co_u32 v3, vcc_lo, v3, 4
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3)
v_dual_mov_b32 v2, v6 :: v_dual_add_nc_u32 v5, s2, v5
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_5
.LBB0_4:
v_mov_b32_e32 v2, 0
.LBB0_5:
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[3:4], null, v0, s2, v[1:2]
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15MatrixMulKernelPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15MatrixMulKernelPiS_S_i, .Lfunc_end0-_Z15MatrixMulKernelPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15MatrixMulKernelPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15MatrixMulKernelPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0004f016_00000000-6_dunn_project1_mult.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z15MatrixMulOnHostPiS_S_i
.type _Z15MatrixMulOnHostPiS_S_i, @function
_Z15MatrixMulOnHostPiS_S_i:
.LFB2057:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L11
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movq %rdi, %r8
movq %rsi, %rbp
movq %rdx, %r11
movl %ecx, %r13d
movslq %ecx, %r12
leaq 0(,%r12,4), %rdi
movq %r8, %rbx
addq %rdi, %r8
movl $0, %r14d
.L5:
movq %rbp, %r10
movl $0, %r9d
.L8:
movq %r10, %rcx
movq %rbx, %rax
movl $0, %esi
.L6:
movl (%rax), %edx
imull (%rcx), %edx
addl %edx, %esi
addq $4, %rax
addq %rdi, %rcx
cmpq %r8, %rax
jne .L6
movl %esi, (%r11,%r9,4)
addq $1, %r9
addq $4, %r10
cmpq %r12, %r9
jne .L8
addl $1, %r14d
addq %rdi, %r11
addq %rdi, %rbx
addq %rdi, %r8
cmpl %r14d, %r13d
jne .L5
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
ret
.cfi_endproc
.LFE2057:
.size _Z15MatrixMulOnHostPiS_S_i, .-_Z15MatrixMulOnHostPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d "
.LC1:
.string "\n"
.text
.globl _Z11printMatrixPii
.type _Z11printMatrixPii, @function
_Z11printMatrixPii:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl %esi, 12(%rsp)
testl %esi, %esi
jle .L15
movslq %esi, %r14
leaq 0(,%r14,4), %r15
leaq (%rdi,%r15), %rbp
negq %r14
salq $2, %r14
movl $0, %r13d
leaq .LC0(%rip), %r12
.L16:
leaq 0(%rbp,%r14), %rbx
.L17:
movl (%rbx), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L17
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addq %r15, %rbp
cmpl %r13d, 12(%rsp)
jne .L16
.L15:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z11printMatrixPii, .-_Z11printMatrixPii
.section .rodata.str1.1
.LC2:
.string "TEST FAILED!!!\n"
.LC3:
.string "TEST PASSED!!!\n"
.text
.globl _Z10verifyMultPiS_i
.type _Z10verifyMultPiS_i, @function
_Z10verifyMultPiS_i:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
testl %edx, %edx
jle .L21
movslq %edx, %rax
leaq 0(,%rax,4), %r10
negq %rax
leaq 0(,%rax,4), %r8
movq %r10, %rcx
movl $0, %r9d
.L22:
leaq (%rcx,%r8), %rax
.L25:
movl (%rsi,%rax), %r11d
cmpl %r11d, (%rdi,%rax)
jne .L28
addq $4, %rax
cmpq %rcx, %rax
jne .L25
addl $1, %r9d
addq %r10, %rcx
cmpl %r9d, %edx
jne .L22
.L21:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L20
.L28:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L20:
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z10verifyMultPiS_i, .-_Z10verifyMultPiS_i
.globl _Z40__device_stub__Z15MatrixMulKernelPiS_S_iPiS_S_i
.type _Z40__device_stub__Z15MatrixMulKernelPiS_S_iPiS_S_i, @function
_Z40__device_stub__Z15MatrixMulKernelPiS_S_iPiS_S_i:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z15MatrixMulKernelPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z40__device_stub__Z15MatrixMulKernelPiS_S_iPiS_S_i, .-_Z40__device_stub__Z15MatrixMulKernelPiS_S_iPiS_S_i
.globl _Z15MatrixMulKernelPiS_S_i
.type _Z15MatrixMulKernelPiS_S_i, @function
_Z15MatrixMulKernelPiS_S_i:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z15MatrixMulKernelPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z15MatrixMulKernelPiS_S_i, .-_Z15MatrixMulKernelPiS_S_i
.section .rodata.str1.1
.LC4:
.string "Matrix A:\n"
.LC5:
.string "\nMatrix B:\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC6:
.string "\nCPU Multiplication of A * B:\n"
.section .rodata.str1.1
.LC7:
.string "Thread Block Count: %d\n"
.LC8:
.string "Starting GPU Computations\n\n"
.LC9:
.string "GPU Multiplication of A * B:\n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $72, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $4, 32(%rsp)
movl $4, 36(%rsp)
movl $1, 40(%rsp)
movl $2, 44(%rsp)
movl $2, 48(%rsp)
movl $1, 52(%rsp)
movl $256, %edi
call malloc@PLT
movq %rax, %rbp
movl $256, %edi
call malloc@PLT
movq %rax, %rbx
movl $256, %edi
call malloc@PLT
movq %rax, %r13
movl $256, %edi
call malloc@PLT
movq %rax, %r12
movl $32, %edi
movl $1325, %edx
.L38:
leaq -32(%rdi), %rsi
.L39:
imull $3125, %edx, %edx
movl %edx, %eax
sarl $31, %eax
shrl $16, %eax
addl %eax, %edx
movzwl %dx, %edx
movl %edx, %ecx
subl %eax, %ecx
movl %ecx, %edx
leal -32768(%rcx), %r8d
movslq %r8d, %rax
imulq $-1610366953, %rax, %rax
shrq $32, %rax
addl %r8d, %eax
sarl $12, %eax
sarl $31, %r8d
subl %r8d, %eax
movl %eax, 0(%rbp,%rsi)
movslq %ecx, %rax
imulq $274877907, %rax, %rax
sarq $38, %rax
movl %ecx, %r8d
sarl $31, %r8d
subl %r8d, %eax
imull $1000, %eax, %eax
subl %eax, %ecx
movl %ecx, (%rbx,%rsi)
addq $4, %rsi
cmpq %rdi, %rsi
jne .L39
addq $32, %rdi
cmpq $288, %rdi
jne .L38
movl $8, %ecx
movq %r13, %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z15MatrixMulOnHostPiS_S_i
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $8, %esi
movq %rbp, %rdi
call _Z11printMatrixPii
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $8, %esi
movq %rbx, %rdi
call _Z11printMatrixPii
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $8, %esi
movq %r13, %rdi
call _Z11printMatrixPii
movl $2, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 8(%rsp), %rdi
movl $256, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $256, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $256, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $256, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $256, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl 40(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movq 44(%rsp), %rdi
movl 52(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L45
.L41:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movl $256, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
call cudaDeviceSynchronize@PLT
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $8, %esi
movq %r12, %rdi
call _Z11printMatrixPii
movl $8, %edx
movq %r12, %rsi
movq %r13, %rdi
call _Z10verifyMultPiS_i
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L46
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
movl $8, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z40__device_stub__Z15MatrixMulKernelPiS_S_iPiS_S_i
jmp .L41
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z15MatrixMulKernelPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z15MatrixMulKernelPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "dunn_project1_mult.hip"
.globl _Z30__device_stub__MatrixMulKernelPiS_S_i # -- Begin function _Z30__device_stub__MatrixMulKernelPiS_S_i
.p2align 4, 0x90
.type _Z30__device_stub__MatrixMulKernelPiS_S_i,@function
_Z30__device_stub__MatrixMulKernelPiS_S_i: # @_Z30__device_stub__MatrixMulKernelPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15MatrixMulKernelPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z30__device_stub__MatrixMulKernelPiS_S_i, .Lfunc_end0-_Z30__device_stub__MatrixMulKernelPiS_S_i
.cfi_endproc
# -- End function
.globl _Z15MatrixMulOnHostPiS_S_i # -- Begin function _Z15MatrixMulOnHostPiS_S_i
.p2align 4, 0x90
.type _Z15MatrixMulOnHostPiS_S_i,@function
_Z15MatrixMulOnHostPiS_S_i: # @_Z15MatrixMulOnHostPiS_S_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdx, -8(%rsp) # 8-byte Spill
testl %ecx, %ecx
jle .LBB1_7
# %bb.1: # %.preheader28.lr.ph
movl %ecx, %eax
leaq (,%rax,4), %r8
xorl %edx, %edx
xorl %r10d, %r10d
.p2align 4, 0x90
.LBB1_2: # %.preheader28
# =>This Loop Header: Depth=1
# Child Loop BB1_3 Depth 2
# Child Loop BB1_4 Depth 3
movl %edx, %r11d
leaq (%rdi,%r11,4), %r11
movq %r10, %rbx
imulq %rax, %rbx
movq -8(%rsp), %r9 # 8-byte Reload
leaq (%r9,%rbx,4), %rbx
movq %rsi, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_3: # %.preheader
# Parent Loop BB1_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB1_4 Depth 3
xorl %r12d, %r12d
movq %r14, %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_4: # Parent Loop BB1_2 Depth=1
# Parent Loop BB1_3 Depth=2
# => This Inner Loop Header: Depth=3
movl (%r13), %r9d
imull (%r11,%r12,4), %r9d
addl %r9d, %ebp
incq %r12
addq %r8, %r13
cmpq %r12, %rax
jne .LBB1_4
# %bb.5: # %._crit_edge
# in Loop: Header=BB1_3 Depth=2
movl %ebp, (%rbx,%r15,4)
incq %r15
addq $4, %r14
cmpq %rax, %r15
jne .LBB1_3
# %bb.6: # %._crit_edge32
# in Loop: Header=BB1_2 Depth=1
incq %r10
addl %ecx, %edx
cmpq %rax, %r10
jne .LBB1_2
.LBB1_7: # %._crit_edge34
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z15MatrixMulOnHostPiS_S_i, .Lfunc_end1-_Z15MatrixMulOnHostPiS_S_i
.cfi_endproc
# -- End function
.globl _Z11printMatrixPii # -- Begin function _Z11printMatrixPii
.p2align 4, 0x90
.type _Z11printMatrixPii,@function
_Z11printMatrixPii: # @_Z11printMatrixPii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, (%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB2_5
# %bb.1: # %.preheader.lr.ph
movl %esi, %ebx
movl %esi, %r15d
xorl %ebp, %ebp
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
movl %ebp, %eax
movq (%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_3: # Parent Loop BB2_2 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r13,%r14,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r14
cmpq %r14, %r15
jne .LBB2_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB2_2 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r12
addl %ebx, %ebp
cmpq %r15, %r12
jne .LBB2_2
.LBB2_5: # %._crit_edge14
movl $10, %edi
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp putchar@PLT # TAILCALL
.Lfunc_end2:
.size _Z11printMatrixPii, .Lfunc_end2-_Z11printMatrixPii
.cfi_endproc
# -- End function
.globl _Z10verifyMultPiS_i # -- Begin function _Z10verifyMultPiS_i
.p2align 4, 0x90
.type _Z10verifyMultPiS_i,@function
_Z10verifyMultPiS_i: # @_Z10verifyMultPiS_i
.cfi_startproc
# %bb.0:
movq %rdi, %rax
movl $.Lstr.1, %edi
testl %edx, %edx
setg %cl
jle puts@PLT # TAILCALL
# %bb.1: # %.preheader.lr.ph
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, %edx
leaq 4(%rax), %r8
leaq (,%rdx,4), %r9
leaq 4(%rsi), %r10
leaq -1(%rdx), %r11
xorl %ebx, %ebx
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_7: # %.critedge
# in Loop: Header=BB3_2 Depth=1
incq %rbx
cmpq %rdx, %rbx
setb %cl
addq %r9, %r8
addq %r9, %r10
cmpq %rdx, %rbx
je .LBB3_9
.LBB3_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
movq %rbx, %r14
imulq %rdx, %r14
movl (%rax,%r14,4), %ebp
cmpl (%rsi,%r14,4), %ebp
jne .LBB3_8
# %bb.3: # %.lr.ph.preheader
# in Loop: Header=BB3_2 Depth=1
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_4: # %.lr.ph
# Parent Loop BB3_2 Depth=1
# => This Inner Loop Header: Depth=2
cmpq %r14, %r11
je .LBB3_7
# %bb.5: # in Loop: Header=BB3_4 Depth=2
movl (%r8,%r14,4), %ebp
leaq 1(%r14), %r15
cmpl (%r10,%r14,4), %ebp
movq %r15, %r14
je .LBB3_4
# %bb.6: # %._crit_edge
# in Loop: Header=BB3_2 Depth=1
cmpq %rdx, %r15
jae .LBB3_7
.LBB3_8: # %.critedge44
movl $.Lstr, %eax
movl $.Lstr.1, %edi
testb $1, %cl
cmovneq %rax, %rdi
.LBB3_9:
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
jmp puts@PLT # TAILCALL
.Lfunc_end3:
.size _Z10verifyMultPiS_i, .Lfunc_end3-_Z10verifyMultPiS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $256, %edi # imm = 0x100
callq malloc
movq %rax, %rbx
movl $256, %edi # imm = 0x100
callq malloc
movq %rax, %r14
movl $256, %edi # imm = 0x100
callq malloc
movq %rax, %r15
movl $256, %edi # imm = 0x100
callq malloc
movq %rax, %r12
movl $1325, %ecx # imm = 0x52D
xorl %eax, %eax
movq %rbx, %rdx
movq %r14, %rsi
.p2align 4, 0x90
.LBB4_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB4_2 Depth 2
xorl %edi, %edi
.p2align 4, 0x90
.LBB4_2: # Parent Loop BB4_1 Depth=1
# => This Inner Loop Header: Depth=2
imull $3125, %ecx, %ecx # imm = 0xC35
movzwl %cx, %ecx
leal -32768(%rcx), %r8d
movslq %r8d, %r8
imulq $-1610366953, %r8, %r8 # imm = 0xA003C017
shrq $32, %r8
addl %ecx, %r8d
addl $-32768, %r8d # imm = 0x8000
movl %r8d, %r9d
shrl $31, %r9d
sarl $12, %r8d
addl %r9d, %r8d
movl %r8d, (%rdx,%rdi,4)
imulq $4294968, %rcx, %r8 # imm = 0x418938
shrq $32, %r8
imull $1000, %r8d, %r8d # imm = 0x3E8
movl %ecx, %r9d
subl %r8d, %r9d
movl %r9d, (%rsi,%rdi,4)
incq %rdi
cmpq $8, %rdi
jne .LBB4_2
# %bb.3: # in Loop: Header=BB4_1 Depth=1
incq %rax
addq $32, %rsi
addq $32, %rdx
cmpq $8, %rax
jne .LBB4_1
# %bb.4: # %.preheader28.i.preheader
xorl %eax, %eax
movq %rbx, %rcx
.p2align 4, 0x90
.LBB4_5: # %.preheader28.i
# =>This Loop Header: Depth=1
# Child Loop BB4_6 Depth 2
# Child Loop BB4_7 Depth 3
movq %rax, %rdx
shlq $5, %rdx
addq %r15, %rdx
movq %r14, %rsi
xorl %edi, %edi
.p2align 4, 0x90
.LBB4_6: # %.preheader.i
# Parent Loop BB4_5 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB4_7 Depth 3
xorl %r8d, %r8d
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB4_7: # Parent Loop BB4_5 Depth=1
# Parent Loop BB4_6 Depth=2
# => This Inner Loop Header: Depth=3
movl (%rsi,%r8,8), %r10d
imull (%rcx,%r8), %r10d
addl %r10d, %r9d
addq $4, %r8
cmpq $32, %r8
jne .LBB4_7
# %bb.8: # %._crit_edge.i
# in Loop: Header=BB4_6 Depth=2
movl %r9d, (%rdx,%rdi,4)
incq %rdi
addq $4, %rsi
cmpq $8, %rdi
jne .LBB4_6
# %bb.9: # %._crit_edge32.i
# in Loop: Header=BB4_5 Depth=1
incq %rax
addq $32, %rcx
cmpq $8, %rax
jne .LBB4_5
# %bb.10: # %_Z15MatrixMulOnHostPiS_S_i.exit
movl $.Lstr.2, %edi
callq puts@PLT
movq %rbx, 32(%rsp) # 8-byte Spill
movq %rbx, %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB4_11: # %.preheader.i47
# =>This Loop Header: Depth=1
# Child Loop BB4_12 Depth 2
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_12: # Parent Loop BB4_11 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r13,%rbx,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $8, %rbx
jne .LBB4_12
# %bb.13: # %._crit_edge.i51
# in Loop: Header=BB4_11 Depth=1
movl $10, %edi
callq putchar@PLT
incq %rbp
addq $32, %r13
cmpq $8, %rbp
jne .LBB4_11
# %bb.14: # %_Z11printMatrixPii.exit
movl $10, %edi
callq putchar@PLT
movl $.Lstr.3, %edi
callq puts@PLT
movq %r14, %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB4_15: # %.preheader.i52
# =>This Loop Header: Depth=1
# Child Loop BB4_16 Depth 2
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_16: # Parent Loop BB4_15 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r13,%rbx,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $8, %rbx
jne .LBB4_16
# %bb.17: # %._crit_edge.i57
# in Loop: Header=BB4_15 Depth=1
movl $10, %edi
callq putchar@PLT
incq %rbp
addq $32, %r13
cmpq $8, %rbp
jne .LBB4_15
# %bb.18: # %_Z11printMatrixPii.exit62
movl $10, %edi
callq putchar@PLT
movl $.Lstr.4, %edi
callq puts@PLT
movq %r15, %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB4_19: # %.preheader.i63
# =>This Loop Header: Depth=1
# Child Loop BB4_20 Depth 2
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_20: # Parent Loop BB4_19 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r13,%rbx,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $8, %rbx
jne .LBB4_20
# %bb.21: # %._crit_edge.i68
# in Loop: Header=BB4_19 Depth=1
movl $10, %edi
callq putchar@PLT
incq %rbp
addq $32, %r13
cmpq $8, %rbp
jne .LBB4_19
# %bb.22: # %_Z11printMatrixPii.exit73
movl $10, %edi
callq putchar@PLT
movl $.L.str.7, %edi
movl $2, %esi
xorl %eax, %eax
callq printf
movl $.Lstr.5, %edi
callq puts@PLT
leaq 16(%rsp), %rdi
movl $256, %esi # imm = 0x100
callq hipMalloc
leaq 8(%rsp), %rdi
movl $256, %esi # imm = 0x100
callq hipMalloc
movq %rsp, %rdi
movl $256, %esi # imm = 0x100
callq hipMalloc
movq 16(%rsp), %rdi
movl $256, %edx # imm = 0x100
movq 32(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $256, %edx # imm = 0x100
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $8589934594, %rdi # imm = 0x200000002
movabsq $17179869188, %rdx # imm = 0x400000004
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_24
# %bb.23:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $8, 28(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 28(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z15MatrixMulKernelPiS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_24:
callq hipDeviceSynchronize
movq (%rsp), %rsi
movl $256, %edx # imm = 0x100
movq %r12, %rdi
movl $2, %ecx
callq hipMemcpy
callq hipDeviceSynchronize
movl $.Lstr.6, %edi
callq puts@PLT
movq %r12, %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB4_25: # %.preheader.i74
# =>This Loop Header: Depth=1
# Child Loop BB4_26 Depth 2
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_26: # Parent Loop BB4_25 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r13,%rbx,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $8, %rbx
jne .LBB4_26
# %bb.27: # %._crit_edge.i79
# in Loop: Header=BB4_25 Depth=1
movl $10, %edi
callq putchar@PLT
incq %rbp
addq $32, %r13
cmpq $8, %rbp
jne .LBB4_25
# %bb.28: # %_Z11printMatrixPii.exit84
movl $10, %edi
callq putchar@PLT
movq %r12, %rax
addq $4, %rax
movq %r15, %rcx
addq $4, %rcx
movb $1, %dl
xorl %esi, %esi
movq 32(%rsp), %rbx # 8-byte Reload
.LBB4_29: # %.preheader.i85
# =>This Loop Header: Depth=1
# Child Loop BB4_31 Depth 2
movq %rsi, %rdi
shlq $5, %rdi
movl (%r15,%rdi), %r8d
cmpl (%r12,%rdi), %r8d
jne .LBB4_33
# %bb.30: # %.lr.ph.preheader
# in Loop: Header=BB4_29 Depth=1
xorl %edi, %edi
.p2align 4, 0x90
.LBB4_31: # %.lr.ph
# Parent Loop BB4_29 Depth=1
# => This Inner Loop Header: Depth=2
cmpq $7, %rdi
je .LBB4_34
# %bb.32: # in Loop: Header=BB4_31 Depth=2
movl (%rcx,%rdi,4), %r8d
leaq 1(%rdi), %r9
cmpl (%rax,%rdi,4), %r8d
movq %r9, %rdi
je .LBB4_31
jmp .LBB4_33
.p2align 4, 0x90
.LBB4_34: # %.critedge.i
# in Loop: Header=BB4_29 Depth=1
cmpq $7, %rsi
leaq 1(%rsi), %rdi
setb %dl
addq $32, %rax
addq $32, %rcx
movq %rdi, %rsi
cmpq $8, %rdi
jne .LBB4_29
# %bb.35:
movl $.Lstr.1, %edi
jmp .LBB4_36
.LBB4_33: # %.critedge
movl $.Lstr, %eax
movl $.Lstr.1, %edi
testb $1, %dl
cmovneq %rax, %rdi
.LBB4_36: # %_Z10verifyMultPiS_i.exit
callq puts@PLT
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15MatrixMulKernelPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15MatrixMulKernelPiS_S_i,@object # @_Z15MatrixMulKernelPiS_S_i
.section .rodata,"a",@progbits
.globl _Z15MatrixMulKernelPiS_S_i
.p2align 3, 0x0
_Z15MatrixMulKernelPiS_S_i:
.quad _Z30__device_stub__MatrixMulKernelPiS_S_i
.size _Z15MatrixMulKernelPiS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d "
.size .L.str, 4
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Thread Block Count: %d\n"
.size .L.str.7, 24
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z15MatrixMulKernelPiS_S_i"
.size .L__unnamed_1, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "TEST FAILED!!!"
.size .Lstr, 15
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "TEST PASSED!!!"
.size .Lstr.1, 15
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Matrix A:"
.size .Lstr.2, 10
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "\nMatrix B:"
.size .Lstr.3, 11
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "\nCPU Multiplication of A * B:"
.size .Lstr.4, 30
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "Starting GPU Computations\n"
.size .Lstr.5, 27
.type .Lstr.6,@object # @str.6
.Lstr.6:
.asciz "GPU Multiplication of A * B:"
.size .Lstr.6, 29
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__MatrixMulKernelPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15MatrixMulKernelPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void calcPReLUKernel(const float *input, float *output, const float *weights, int width, int height, int channels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height) {
return;
}
output[y * width + x] = input[y * width + x] > 0 ? input[y * width + x] : input[y * width + x] * weights[y % channels];
} | code for sm_80
Function : _Z15calcPReLUKernelPKfPfS0_iii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R7, SR_CTAID.Y ; /* 0x0000000000077919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R7, R7, c[0x0][0x4], R2 ; /* 0x0000010007077a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R7, c[0x0][0x17c], PT ; /* 0x00005f0007007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0203 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */
/* 0x000fe200078e00ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ IMAD R0, R7, c[0x0][0x178], R0 ; /* 0x00005e0007007a24 */
/* 0x000fc800078e0200 */
/*00d0*/ IMAD.WIDE R4, R0, R11, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fca00078e020b */
/*00e0*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */
/* 0x000fe20000011400 */
/*0100*/ BSSY B0, 0x2e0 ; /* 0x000001d000007945 */
/* 0x000fe20003800000 */
/*0110*/ LEA R2, P1, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000027a11 */
/* 0x000fc800078210ff */
/*0120*/ LEA.HI.X R3, R0, c[0x0][0x16c], R3, 0x2, P1 ; /* 0x00005b0000037a11 */
/* 0x000fe400008f1403 */
/*0130*/ FSETP.GT.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720b */
/* 0x004fda0003f04000 */
/*0140*/ @P0 BRA 0x2d0 ; /* 0x0000018000000947 */
/* 0x000fea0003800000 */
/*0150*/ IABS R13, c[0x0][0x180] ; /* 0x00006000000d7a13 */
/* 0x000fe40000000000 */
/*0160*/ ISETP.GE.AND P2, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe40003f46270 */
/*0170*/ I2F.RP R0, R13 ; /* 0x0000000d00007306 */
/* 0x000e300000209400 */
/*0180*/ MUFU.RCP R0, R0 ; /* 0x0000000000007308 */
/* 0x001e240000001000 */
/*0190*/ IADD3 R4, R0, 0xffffffe, RZ ; /* 0x0ffffffe00047810 */
/* 0x001fcc0007ffe0ff */
/*01a0*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*01b0*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x001fe200078e00ff */
/*01c0*/ IADD3 R6, RZ, -R5, RZ ; /* 0x80000005ff067210 */
/* 0x002fca0007ffe0ff */
/*01d0*/ IMAD R15, R6, R13, RZ ; /* 0x0000000d060f7224 */
/* 0x000fe200078e02ff */
/*01e0*/ IABS R6, R7 ; /* 0x0000000700067213 */
/* 0x000fc60000000000 */
/*01f0*/ IMAD.HI.U32 R5, R5, R15, R4 ; /* 0x0000000f05057227 */
/* 0x000fcc00078e0004 */
/*0200*/ IMAD.HI.U32 R5, R5, R6, RZ ; /* 0x0000000605057227 */
/* 0x000fc800078e00ff */
/*0210*/ IMAD.MOV R5, RZ, RZ, -R5 ; /* 0x000000ffff057224 */
/* 0x000fc800078e0a05 */
/*0220*/ IMAD R0, R13, R5, R6 ; /* 0x000000050d007224 */
/* 0x000fca00078e0206 */
/*0230*/ ISETP.GT.U32.AND P0, PT, R13, R0, PT ; /* 0x000000000d00720c */
/* 0x000fda0003f04070 */
/*0240*/ @!P0 IADD3 R0, R0, -R13, RZ ; /* 0x8000000d00008210 */
/* 0x000fe40007ffe0ff */
/*0250*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x180], PT ; /* 0x00006000ff007a0c */
/* 0x000fe40003f05270 */
/*0260*/ ISETP.GT.U32.AND P1, PT, R13, R0, PT ; /* 0x000000000d00720c */
/* 0x000fda0003f24070 */
/*0270*/ @!P1 IMAD.IADD R0, R0, 0x1, -R13 ; /* 0x0000000100009824 */
/* 0x000fca00078e0a0d */
/*0280*/ @!P2 IADD3 R0, -R0, RZ, RZ ; /* 0x000000ff0000a210 */
/* 0x000fe40007ffe1ff */
/*0290*/ @!P0 LOP3.LUT R0, RZ, c[0x0][0x180], RZ, 0x33, !PT ; /* 0x00006000ff008a12 */
/* 0x000fca00078e33ff */
/*02a0*/ IMAD.WIDE R4, R0, R11, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x000fcc00078e020b */
/*02b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea4000c1e1900 */
/*02c0*/ FMUL R9, R9, R4 ; /* 0x0000000409097220 */
/* 0x004fe40000400000 */
/*02d0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*02e0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x000fe2000c101904 */
/*02f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0300*/ BRA 0x300; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void calcPReLUKernel(const float *input, float *output, const float *weights, int width, int height, int channels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height) {
return;
}
output[y * width + x] = input[y * width + x] > 0 ? input[y * width + x] : input[y * width + x] * weights[y % channels];
} | .file "tmpxft_000c0708_00000000-6_calcPReLUKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z44__device_stub__Z15calcPReLUKernelPKfPfS0_iiiPKfPfS0_iii
.type _Z44__device_stub__Z15calcPReLUKernelPKfPfS0_iiiPKfPfS0_iii, @function
_Z44__device_stub__Z15calcPReLUKernelPKfPfS0_iiiPKfPfS0_iii:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z15calcPReLUKernelPKfPfS0_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z44__device_stub__Z15calcPReLUKernelPKfPfS0_iiiPKfPfS0_iii, .-_Z44__device_stub__Z15calcPReLUKernelPKfPfS0_iiiPKfPfS0_iii
.globl _Z15calcPReLUKernelPKfPfS0_iii
.type _Z15calcPReLUKernelPKfPfS0_iii, @function
_Z15calcPReLUKernelPKfPfS0_iii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z15calcPReLUKernelPKfPfS0_iiiPKfPfS0_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15calcPReLUKernelPKfPfS0_iii, .-_Z15calcPReLUKernelPKfPfS0_iii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z15calcPReLUKernelPKfPfS0_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z15calcPReLUKernelPKfPfS0_iii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void calcPReLUKernel(const float *input, float *output, const float *weights, int width, int height, int channels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height) {
return;
}
output[y * width + x] = input[y * width + x] > 0 ? input[y * width + x] : input[y * width + x] * weights[y % channels];
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void calcPReLUKernel(const float *input, float *output, const float *weights, int width, int height, int channels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height) {
return;
}
output[y * width + x] = input[y * width + x] > 0 ? input[y * width + x] : input[y * width + x] * weights[y % channels];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void calcPReLUKernel(const float *input, float *output, const float *weights, int width, int height, int channels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height) {
return;
}
output[y * width + x] = input[y * width + x] > 0 ? input[y * width + x] : input[y * width + x] * weights[y % channels];
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15calcPReLUKernelPKfPfS0_iii
.globl _Z15calcPReLUKernelPKfPfS0_iii
.p2align 8
.type _Z15calcPReLUKernelPKfPfS0_iii,@function
_Z15calcPReLUKernelPKfPfS0_iii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b64 s[4:5], s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v4, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[2:3], null, s15, s2, v[4:5]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s4, v0
v_cmp_gt_i32_e64 s2, s5, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_4
s_load_b64 s[2:3], s[0:1], 0x0
v_mad_u64_u32 v[3:4], null, v2, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v1, vcc_lo
s_mov_b32 s2, exec_lo
global_load_b32 v3, v[3:4], off
s_waitcnt vmcnt(0)
v_cmpx_nlt_f32_e32 0, v3
s_cbranch_execz .LBB0_3
s_load_b32 s3, s[0:1], 0x20
v_ashrrev_i32_e32 v6, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, v2, v6
v_xor_b32_e32 v2, v2, v6
s_waitcnt lgkmcnt(0)
s_ashr_i32 s4, s3, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s3, s3, s4
s_xor_b32 s3, s3, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cvt_f32_u32_e32 v4, s3
s_sub_i32 s4, 0, s3
v_rcp_iflag_f32_e32 v4, v4
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v4, 0x4f7ffffe, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v4, v4
v_mul_lo_u32 v5, s4, v4
s_load_b64 s[4:5], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v5, v4, v5
v_add_nc_u32_e32 v4, v4, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v4, v2, v4
v_mul_lo_u32 v4, v4, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v2, v2, v4
v_subrev_nc_u32_e32 v4, s3, v2
v_cmp_le_u32_e32 vcc_lo, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v2, v2, v4, vcc_lo
v_subrev_nc_u32_e32 v4, s3, v2
v_cmp_le_u32_e32 vcc_lo, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v2, v2, v4, vcc_lo
v_xor_b32_e32 v2, v2, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v4, v2, v6
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s4, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
global_load_b32 v2, v[4:5], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v3, v3, v2
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s2
s_load_b64 s[0:1], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v3, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15calcPReLUKernelPKfPfS0_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15calcPReLUKernelPKfPfS0_iii, .Lfunc_end0-_Z15calcPReLUKernelPKfPfS0_iii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15calcPReLUKernelPKfPfS0_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15calcPReLUKernelPKfPfS0_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void calcPReLUKernel(const float *input, float *output, const float *weights, int width, int height, int channels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height) {
return;
}
output[y * width + x] = input[y * width + x] > 0 ? input[y * width + x] : input[y * width + x] * weights[y % channels];
} | .text
.file "calcPReLUKernel.hip"
.globl _Z30__device_stub__calcPReLUKernelPKfPfS0_iii # -- Begin function _Z30__device_stub__calcPReLUKernelPKfPfS0_iii
.p2align 4, 0x90
.type _Z30__device_stub__calcPReLUKernelPKfPfS0_iii,@function
_Z30__device_stub__calcPReLUKernelPKfPfS0_iii: # @_Z30__device_stub__calcPReLUKernelPKfPfS0_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15calcPReLUKernelPKfPfS0_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z30__device_stub__calcPReLUKernelPKfPfS0_iii, .Lfunc_end0-_Z30__device_stub__calcPReLUKernelPKfPfS0_iii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15calcPReLUKernelPKfPfS0_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15calcPReLUKernelPKfPfS0_iii,@object # @_Z15calcPReLUKernelPKfPfS0_iii
.section .rodata,"a",@progbits
.globl _Z15calcPReLUKernelPKfPfS0_iii
.p2align 3, 0x0
_Z15calcPReLUKernelPKfPfS0_iii:
.quad _Z30__device_stub__calcPReLUKernelPKfPfS0_iii
.size _Z15calcPReLUKernelPKfPfS0_iii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15calcPReLUKernelPKfPfS0_iii"
.size .L__unnamed_1, 31
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__calcPReLUKernelPKfPfS0_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15calcPReLUKernelPKfPfS0_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z15calcPReLUKernelPKfPfS0_iii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R7, SR_CTAID.Y ; /* 0x0000000000077919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R7, R7, c[0x0][0x4], R2 ; /* 0x0000010007077a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R7, c[0x0][0x17c], PT ; /* 0x00005f0007007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0203 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */
/* 0x000fe200078e00ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ IMAD R0, R7, c[0x0][0x178], R0 ; /* 0x00005e0007007a24 */
/* 0x000fc800078e0200 */
/*00d0*/ IMAD.WIDE R4, R0, R11, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fca00078e020b */
/*00e0*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */
/* 0x000fe20000011400 */
/*0100*/ BSSY B0, 0x2e0 ; /* 0x000001d000007945 */
/* 0x000fe20003800000 */
/*0110*/ LEA R2, P1, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000027a11 */
/* 0x000fc800078210ff */
/*0120*/ LEA.HI.X R3, R0, c[0x0][0x16c], R3, 0x2, P1 ; /* 0x00005b0000037a11 */
/* 0x000fe400008f1403 */
/*0130*/ FSETP.GT.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720b */
/* 0x004fda0003f04000 */
/*0140*/ @P0 BRA 0x2d0 ; /* 0x0000018000000947 */
/* 0x000fea0003800000 */
/*0150*/ IABS R13, c[0x0][0x180] ; /* 0x00006000000d7a13 */
/* 0x000fe40000000000 */
/*0160*/ ISETP.GE.AND P2, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe40003f46270 */
/*0170*/ I2F.RP R0, R13 ; /* 0x0000000d00007306 */
/* 0x000e300000209400 */
/*0180*/ MUFU.RCP R0, R0 ; /* 0x0000000000007308 */
/* 0x001e240000001000 */
/*0190*/ IADD3 R4, R0, 0xffffffe, RZ ; /* 0x0ffffffe00047810 */
/* 0x001fcc0007ffe0ff */
/*01a0*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*01b0*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x001fe200078e00ff */
/*01c0*/ IADD3 R6, RZ, -R5, RZ ; /* 0x80000005ff067210 */
/* 0x002fca0007ffe0ff */
/*01d0*/ IMAD R15, R6, R13, RZ ; /* 0x0000000d060f7224 */
/* 0x000fe200078e02ff */
/*01e0*/ IABS R6, R7 ; /* 0x0000000700067213 */
/* 0x000fc60000000000 */
/*01f0*/ IMAD.HI.U32 R5, R5, R15, R4 ; /* 0x0000000f05057227 */
/* 0x000fcc00078e0004 */
/*0200*/ IMAD.HI.U32 R5, R5, R6, RZ ; /* 0x0000000605057227 */
/* 0x000fc800078e00ff */
/*0210*/ IMAD.MOV R5, RZ, RZ, -R5 ; /* 0x000000ffff057224 */
/* 0x000fc800078e0a05 */
/*0220*/ IMAD R0, R13, R5, R6 ; /* 0x000000050d007224 */
/* 0x000fca00078e0206 */
/*0230*/ ISETP.GT.U32.AND P0, PT, R13, R0, PT ; /* 0x000000000d00720c */
/* 0x000fda0003f04070 */
/*0240*/ @!P0 IADD3 R0, R0, -R13, RZ ; /* 0x8000000d00008210 */
/* 0x000fe40007ffe0ff */
/*0250*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x180], PT ; /* 0x00006000ff007a0c */
/* 0x000fe40003f05270 */
/*0260*/ ISETP.GT.U32.AND P1, PT, R13, R0, PT ; /* 0x000000000d00720c */
/* 0x000fda0003f24070 */
/*0270*/ @!P1 IMAD.IADD R0, R0, 0x1, -R13 ; /* 0x0000000100009824 */
/* 0x000fca00078e0a0d */
/*0280*/ @!P2 IADD3 R0, -R0, RZ, RZ ; /* 0x000000ff0000a210 */
/* 0x000fe40007ffe1ff */
/*0290*/ @!P0 LOP3.LUT R0, RZ, c[0x0][0x180], RZ, 0x33, !PT ; /* 0x00006000ff008a12 */
/* 0x000fca00078e33ff */
/*02a0*/ IMAD.WIDE R4, R0, R11, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x000fcc00078e020b */
/*02b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea4000c1e1900 */
/*02c0*/ FMUL R9, R9, R4 ; /* 0x0000000409097220 */
/* 0x004fe40000400000 */
/*02d0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*02e0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x000fe2000c101904 */
/*02f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0300*/ BRA 0x300; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15calcPReLUKernelPKfPfS0_iii
.globl _Z15calcPReLUKernelPKfPfS0_iii
.p2align 8
.type _Z15calcPReLUKernelPKfPfS0_iii,@function
_Z15calcPReLUKernelPKfPfS0_iii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b64 s[4:5], s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v4, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[2:3], null, s15, s2, v[4:5]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s4, v0
v_cmp_gt_i32_e64 s2, s5, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_4
s_load_b64 s[2:3], s[0:1], 0x0
v_mad_u64_u32 v[3:4], null, v2, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v1, vcc_lo
s_mov_b32 s2, exec_lo
global_load_b32 v3, v[3:4], off
s_waitcnt vmcnt(0)
v_cmpx_nlt_f32_e32 0, v3
s_cbranch_execz .LBB0_3
s_load_b32 s3, s[0:1], 0x20
v_ashrrev_i32_e32 v6, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, v2, v6
v_xor_b32_e32 v2, v2, v6
s_waitcnt lgkmcnt(0)
s_ashr_i32 s4, s3, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s3, s3, s4
s_xor_b32 s3, s3, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cvt_f32_u32_e32 v4, s3
s_sub_i32 s4, 0, s3
v_rcp_iflag_f32_e32 v4, v4
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v4, 0x4f7ffffe, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v4, v4
v_mul_lo_u32 v5, s4, v4
s_load_b64 s[4:5], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v5, v4, v5
v_add_nc_u32_e32 v4, v4, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v4, v2, v4
v_mul_lo_u32 v4, v4, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v2, v2, v4
v_subrev_nc_u32_e32 v4, s3, v2
v_cmp_le_u32_e32 vcc_lo, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v2, v2, v4, vcc_lo
v_subrev_nc_u32_e32 v4, s3, v2
v_cmp_le_u32_e32 vcc_lo, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v2, v2, v4, vcc_lo
v_xor_b32_e32 v2, v2, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v4, v2, v6
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s4, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
global_load_b32 v2, v[4:5], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v3, v3, v2
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s2
s_load_b64 s[0:1], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v3, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15calcPReLUKernelPKfPfS0_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15calcPReLUKernelPKfPfS0_iii, .Lfunc_end0-_Z15calcPReLUKernelPKfPfS0_iii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15calcPReLUKernelPKfPfS0_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15calcPReLUKernelPKfPfS0_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000c0708_00000000-6_calcPReLUKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z44__device_stub__Z15calcPReLUKernelPKfPfS0_iiiPKfPfS0_iii
.type _Z44__device_stub__Z15calcPReLUKernelPKfPfS0_iiiPKfPfS0_iii, @function
_Z44__device_stub__Z15calcPReLUKernelPKfPfS0_iiiPKfPfS0_iii:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z15calcPReLUKernelPKfPfS0_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z44__device_stub__Z15calcPReLUKernelPKfPfS0_iiiPKfPfS0_iii, .-_Z44__device_stub__Z15calcPReLUKernelPKfPfS0_iiiPKfPfS0_iii
.globl _Z15calcPReLUKernelPKfPfS0_iii
.type _Z15calcPReLUKernelPKfPfS0_iii, @function
_Z15calcPReLUKernelPKfPfS0_iii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z15calcPReLUKernelPKfPfS0_iiiPKfPfS0_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15calcPReLUKernelPKfPfS0_iii, .-_Z15calcPReLUKernelPKfPfS0_iii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z15calcPReLUKernelPKfPfS0_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z15calcPReLUKernelPKfPfS0_iii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "calcPReLUKernel.hip"
.globl _Z30__device_stub__calcPReLUKernelPKfPfS0_iii # -- Begin function _Z30__device_stub__calcPReLUKernelPKfPfS0_iii
.p2align 4, 0x90
.type _Z30__device_stub__calcPReLUKernelPKfPfS0_iii,@function
_Z30__device_stub__calcPReLUKernelPKfPfS0_iii: # @_Z30__device_stub__calcPReLUKernelPKfPfS0_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15calcPReLUKernelPKfPfS0_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z30__device_stub__calcPReLUKernelPKfPfS0_iii, .Lfunc_end0-_Z30__device_stub__calcPReLUKernelPKfPfS0_iii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15calcPReLUKernelPKfPfS0_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15calcPReLUKernelPKfPfS0_iii,@object # @_Z15calcPReLUKernelPKfPfS0_iii
.section .rodata,"a",@progbits
.globl _Z15calcPReLUKernelPKfPfS0_iii
.p2align 3, 0x0
_Z15calcPReLUKernelPKfPfS0_iii:
.quad _Z30__device_stub__calcPReLUKernelPKfPfS0_iii
.size _Z15calcPReLUKernelPKfPfS0_iii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15calcPReLUKernelPKfPfS0_iii"
.size .L__unnamed_1, 31
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__calcPReLUKernelPKfPfS0_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15calcPReLUKernelPKfPfS0_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#define N 10
typedef struct {
float x[N];
float y[N];
float z[N];
} coord;
__global__
void compute_dist2(float *x, float *y, float *z, float *result) {
int index = threadIdx.x;
float deltaX = x[index+1] - x[index];
float deltaY = y[index+1] - y[index];
float deltaZ = z[index+1] - z[index];
result[index] = deltaX*deltaX + deltaY*deltaY + deltaZ*deltaZ;
}
void loadRandomCoords(coord *c) {
int i;
srand(time(NULL));
for (i = 0; i<N; i++) {
c->x[i] = rand();
c->y[i] = rand();
c->z[i] = rand();
}
}
void loadTestCoords(coord *c) {
int i;
for (i = 0; i<N; i++) {
c->x[i] = i+1;
c->y[i] = i+2;
c->z[i] = i+3;
}
}
void printArray(float *f, int len) {
int i;
for (i = 0; i<len; i++) {
printf("%f ", f[i]);
}
printf("\n");
}
int main() {
coord c;
float result[N-1];
float *dev_x, *dev_y, *dev_z, *dev_result;
dim3 grid(1,1), block(N-1,1);
//Load coordinates into host arrays.
loadRandomCoords(&c);
//Allocate memory for device pointers.
cudaMalloc(&dev_x, N*sizeof(float));
cudaMalloc(&dev_y, N*sizeof(float));
cudaMalloc(&dev_z, N*sizeof(float));
cudaMalloc(&dev_result, (N-1)*sizeof(float));
//Transfer coordinates from host to device.
cudaMemcpy(dev_x, c.x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, c.y, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_z, c.z, N*sizeof(float), cudaMemcpyHostToDevice);
//Call kernel to compute euclidean distance b/w adjacent points squared.
//Be sure to only use device pointers since device can't access host mem
//and vice versa.
compute_dist2 <<<grid,block>>>(dev_x, dev_y, dev_z, dev_result);
//Transfer results from device memory to host memory.
cudaMemcpy(result, dev_result, (N-1)*sizeof(float), cudaMemcpyDeviceToHost);
//Free device memory.
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_z);
cudaFree(dev_result);
printArray(result, N-1);
return 0;
} | code for sm_80
Function : _Z13compute_dist2PfS_S_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R12, SR_TID.X ; /* 0x00000000000c7919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R15, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0f7435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R4, R12, R15, c[0x0][0x168] ; /* 0x00005a000c047625 */
/* 0x001fc800078e020f */
/*0050*/ IMAD.WIDE R2, R12.reuse, R15.reuse, c[0x0][0x160] ; /* 0x000058000c027625 */
/* 0x0c0fe200078e020f */
/*0060*/ LDG.E R8, [R4.64+0x4] ; /* 0x0000040404087981 */
/* 0x0000a8000c1e1900 */
/*0070*/ LDG.E R11, [R4.64] ; /* 0x00000004040b7981 */
/* 0x0000a2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R12, R15, c[0x0][0x170] ; /* 0x00005c000c067625 */
/* 0x000fc600078e020f */
/*0090*/ LDG.E R0, [R2.64+0x4] ; /* 0x0000040402007981 */
/* 0x000ee8000c1e1900 */
/*00a0*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */
/* 0x000ee8000c1e1900 */
/*00b0*/ LDG.E R10, [R6.64+0x4] ; /* 0x00000404060a7981 */
/* 0x000f28000c1e1900 */
/*00c0*/ LDG.E R13, [R6.64] ; /* 0x00000004060d7981 */
/* 0x000f22000c1e1900 */
/*00d0*/ IMAD.WIDE R4, R12, R15, c[0x0][0x178] ; /* 0x00005e000c047625 */
/* 0x001fc800078e020f */
/*00e0*/ FADD R8, R8, -R11 ; /* 0x8000000b08087221 */
/* 0x004fe40000000000 */
/*00f0*/ FADD R0, R0, -R9 ; /* 0x8000000900007221 */
/* 0x008fe40000000000 */
/*0100*/ FMUL R9, R8, R8 ; /* 0x0000000808097220 */
/* 0x000fc80000400000 */
/*0110*/ FFMA R9, R0, R0, R9 ; /* 0x0000000000097223 */
/* 0x000fe40000000009 */
/*0120*/ FADD R10, R10, -R13 ; /* 0x8000000d0a0a7221 */
/* 0x010fc80000000000 */
/*0130*/ FFMA R9, R10, R10, R9 ; /* 0x0000000a0a097223 */
/* 0x000fca0000000009 */
/*0140*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x000fe2000c101904 */
/*0150*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0160*/ BRA 0x160; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#define N 10
typedef struct {
float x[N];
float y[N];
float z[N];
} coord;
__global__
void compute_dist2(float *x, float *y, float *z, float *result) {
int index = threadIdx.x;
float deltaX = x[index+1] - x[index];
float deltaY = y[index+1] - y[index];
float deltaZ = z[index+1] - z[index];
result[index] = deltaX*deltaX + deltaY*deltaY + deltaZ*deltaZ;
}
void loadRandomCoords(coord *c) {
int i;
srand(time(NULL));
for (i = 0; i<N; i++) {
c->x[i] = rand();
c->y[i] = rand();
c->z[i] = rand();
}
}
void loadTestCoords(coord *c) {
int i;
for (i = 0; i<N; i++) {
c->x[i] = i+1;
c->y[i] = i+2;
c->z[i] = i+3;
}
}
void printArray(float *f, int len) {
int i;
for (i = 0; i<len; i++) {
printf("%f ", f[i]);
}
printf("\n");
}
int main() {
coord c;
float result[N-1];
float *dev_x, *dev_y, *dev_z, *dev_result;
dim3 grid(1,1), block(N-1,1);
//Load coordinates into host arrays.
loadRandomCoords(&c);
//Allocate memory for device pointers.
cudaMalloc(&dev_x, N*sizeof(float));
cudaMalloc(&dev_y, N*sizeof(float));
cudaMalloc(&dev_z, N*sizeof(float));
cudaMalloc(&dev_result, (N-1)*sizeof(float));
//Transfer coordinates from host to device.
cudaMemcpy(dev_x, c.x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, c.y, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_z, c.z, N*sizeof(float), cudaMemcpyHostToDevice);
//Call kernel to compute euclidean distance b/w adjacent points squared.
//Be sure to only use device pointers since device can't access host mem
//and vice versa.
compute_dist2 <<<grid,block>>>(dev_x, dev_y, dev_z, dev_result);
//Transfer results from device memory to host memory.
cudaMemcpy(result, dev_result, (N-1)*sizeof(float), cudaMemcpyDeviceToHost);
//Free device memory.
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_z);
cudaFree(dev_result);
printArray(result, N-1);
return 0;
} | .file "tmpxft_00061334_00000000-6_euclidean_dist.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z16loadRandomCoordsP5coord
.type _Z16loadRandomCoordsP5coord, @function
_Z16loadRandomCoordsP5coord:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbp
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movq %rbp, %rbx
addq $40, %rbp
.L4:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 40(%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 80(%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L4
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z16loadRandomCoordsP5coord, .-_Z16loadRandomCoordsP5coord
.globl _Z14loadTestCoordsP5coord
.type _Z14loadTestCoordsP5coord, @function
_Z14loadTestCoordsP5coord:
.LFB2058:
.cfi_startproc
endbr64
movl $1, %eax
.L8:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, -4(%rdi,%rax,4)
leal 1(%rax), %edx
pxor %xmm0, %xmm0
cvtsi2ssl %edx, %xmm0
movss %xmm0, 36(%rdi,%rax,4)
leal 2(%rax), %edx
pxor %xmm0, %xmm0
cvtsi2ssl %edx, %xmm0
movss %xmm0, 76(%rdi,%rax,4)
addq $1, %rax
cmpq $11, %rax
jne .L8
ret
.cfi_endproc
.LFE2058:
.size _Z14loadTestCoordsP5coord, .-_Z14loadTestCoordsP5coord
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%f "
.LC1:
.string "\n"
.text
.globl _Z10printArrayPfi
.type _Z10printArrayPfi, @function
_Z10printArrayPfi:
.LFB2059:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
testl %esi, %esi
jle .L11
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %r12
leaq .LC0(%rip), %rbp
.L12:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L12
.L11:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z10printArrayPfi, .-_Z10printArrayPfi
.globl _Z39__device_stub__Z13compute_dist2PfS_S_S_PfS_S_S_
.type _Z39__device_stub__Z13compute_dist2PfS_S_S_PfS_S_S_, @function
_Z39__device_stub__Z13compute_dist2PfS_S_S_PfS_S_S_:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13compute_dist2PfS_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z39__device_stub__Z13compute_dist2PfS_S_S_PfS_S_S_, .-_Z39__device_stub__Z13compute_dist2PfS_S_S_PfS_S_S_
.globl _Z13compute_dist2PfS_S_S_
.type _Z13compute_dist2PfS_S_S_, @function
_Z13compute_dist2PfS_S_S_:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z13compute_dist2PfS_S_S_PfS_S_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z13compute_dist2PfS_S_S_, .-_Z13compute_dist2PfS_S_S_
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $240, %rsp
.cfi_def_cfa_offset 256
movq %fs:40, %rax
movq %rax, 232(%rsp)
xorl %eax, %eax
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $9, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
leaq 112(%rsp), %rbx
movq %rbx, %rdi
call _Z16loadRandomCoordsP5coord
leaq 8(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $36, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $40, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 152(%rsp), %rsi
movl $1, %ecx
movl $40, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 192(%rsp), %rsi
movl $1, %ecx
movl $40, %edx
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl 60(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 52(%rsp), %rdx
movq 40(%rsp), %rdi
movl 48(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L27
.L24:
leaq 64(%rsp), %rbx
movl $2, %ecx
movl $36, %edx
movq 32(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movl $9, %esi
movq %rbx, %rdi
call _Z10printArrayPfi
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L28
movl $0, %eax
addq $240, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L27:
.cfi_restore_state
movq 32(%rsp), %rcx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z39__device_stub__Z13compute_dist2PfS_S_S_PfS_S_S_
jmp .L24
.L28:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z13compute_dist2PfS_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z13compute_dist2PfS_S_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#define N 10
typedef struct {
float x[N];
float y[N];
float z[N];
} coord;
__global__
void compute_dist2(float *x, float *y, float *z, float *result) {
int index = threadIdx.x;
float deltaX = x[index+1] - x[index];
float deltaY = y[index+1] - y[index];
float deltaZ = z[index+1] - z[index];
result[index] = deltaX*deltaX + deltaY*deltaY + deltaZ*deltaZ;
}
void loadRandomCoords(coord *c) {
int i;
srand(time(NULL));
for (i = 0; i<N; i++) {
c->x[i] = rand();
c->y[i] = rand();
c->z[i] = rand();
}
}
void loadTestCoords(coord *c) {
int i;
for (i = 0; i<N; i++) {
c->x[i] = i+1;
c->y[i] = i+2;
c->z[i] = i+3;
}
}
void printArray(float *f, int len) {
int i;
for (i = 0; i<len; i++) {
printf("%f ", f[i]);
}
printf("\n");
}
int main() {
coord c;
float result[N-1];
float *dev_x, *dev_y, *dev_z, *dev_result;
dim3 grid(1,1), block(N-1,1);
//Load coordinates into host arrays.
loadRandomCoords(&c);
//Allocate memory for device pointers.
cudaMalloc(&dev_x, N*sizeof(float));
cudaMalloc(&dev_y, N*sizeof(float));
cudaMalloc(&dev_z, N*sizeof(float));
cudaMalloc(&dev_result, (N-1)*sizeof(float));
//Transfer coordinates from host to device.
cudaMemcpy(dev_x, c.x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, c.y, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_z, c.z, N*sizeof(float), cudaMemcpyHostToDevice);
//Call kernel to compute euclidean distance b/w adjacent points squared.
//Be sure to only use device pointers since device can't access host mem
//and vice versa.
compute_dist2 <<<grid,block>>>(dev_x, dev_y, dev_z, dev_result);
//Transfer results from device memory to host memory.
cudaMemcpy(result, dev_result, (N-1)*sizeof(float), cudaMemcpyDeviceToHost);
//Free device memory.
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_z);
cudaFree(dev_result);
printArray(result, N-1);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#define N 10
typedef struct {
float x[N];
float y[N];
float z[N];
} coord;
__global__
void compute_dist2(float *x, float *y, float *z, float *result) {
int index = threadIdx.x;
float deltaX = x[index+1] - x[index];
float deltaY = y[index+1] - y[index];
float deltaZ = z[index+1] - z[index];
result[index] = deltaX*deltaX + deltaY*deltaY + deltaZ*deltaZ;
}
void loadRandomCoords(coord *c) {
int i;
srand(time(NULL));
for (i = 0; i<N; i++) {
c->x[i] = rand();
c->y[i] = rand();
c->z[i] = rand();
}
}
void loadTestCoords(coord *c) {
int i;
for (i = 0; i<N; i++) {
c->x[i] = i+1;
c->y[i] = i+2;
c->z[i] = i+3;
}
}
void printArray(float *f, int len) {
int i;
for (i = 0; i<len; i++) {
printf("%f ", f[i]);
}
printf("\n");
}
int main() {
coord c;
float result[N-1];
float *dev_x, *dev_y, *dev_z, *dev_result;
dim3 grid(1,1), block(N-1,1);
//Load coordinates into host arrays.
loadRandomCoords(&c);
//Allocate memory for device pointers.
hipMalloc(&dev_x, N*sizeof(float));
hipMalloc(&dev_y, N*sizeof(float));
hipMalloc(&dev_z, N*sizeof(float));
hipMalloc(&dev_result, (N-1)*sizeof(float));
//Transfer coordinates from host to device.
hipMemcpy(dev_x, c.x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_y, c.y, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_z, c.z, N*sizeof(float), hipMemcpyHostToDevice);
//Call kernel to compute euclidean distance b/w adjacent points squared.
//Be sure to only use device pointers since device can't access host mem
//and vice versa.
compute_dist2 <<<grid,block>>>(dev_x, dev_y, dev_z, dev_result);
//Transfer results from device memory to host memory.
hipMemcpy(result, dev_result, (N-1)*sizeof(float), hipMemcpyDeviceToHost);
//Free device memory.
hipFree(dev_x);
hipFree(dev_y);
hipFree(dev_z);
hipFree(dev_result);
printArray(result, N-1);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#define N 10
typedef struct {
float x[N];
float y[N];
float z[N];
} coord;
__global__
void compute_dist2(float *x, float *y, float *z, float *result) {
int index = threadIdx.x;
float deltaX = x[index+1] - x[index];
float deltaY = y[index+1] - y[index];
float deltaZ = z[index+1] - z[index];
result[index] = deltaX*deltaX + deltaY*deltaY + deltaZ*deltaZ;
}
void loadRandomCoords(coord *c) {
int i;
srand(time(NULL));
for (i = 0; i<N; i++) {
c->x[i] = rand();
c->y[i] = rand();
c->z[i] = rand();
}
}
void loadTestCoords(coord *c) {
int i;
for (i = 0; i<N; i++) {
c->x[i] = i+1;
c->y[i] = i+2;
c->z[i] = i+3;
}
}
void printArray(float *f, int len) {
int i;
for (i = 0; i<len; i++) {
printf("%f ", f[i]);
}
printf("\n");
}
int main() {
coord c;
float result[N-1];
float *dev_x, *dev_y, *dev_z, *dev_result;
dim3 grid(1,1), block(N-1,1);
//Load coordinates into host arrays.
loadRandomCoords(&c);
//Allocate memory for device pointers.
hipMalloc(&dev_x, N*sizeof(float));
hipMalloc(&dev_y, N*sizeof(float));
hipMalloc(&dev_z, N*sizeof(float));
hipMalloc(&dev_result, (N-1)*sizeof(float));
//Transfer coordinates from host to device.
hipMemcpy(dev_x, c.x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_y, c.y, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_z, c.z, N*sizeof(float), hipMemcpyHostToDevice);
//Call kernel to compute euclidean distance b/w adjacent points squared.
//Be sure to only use device pointers since device can't access host mem
//and vice versa.
compute_dist2 <<<grid,block>>>(dev_x, dev_y, dev_z, dev_result);
//Transfer results from device memory to host memory.
hipMemcpy(result, dev_result, (N-1)*sizeof(float), hipMemcpyDeviceToHost);
//Free device memory.
hipFree(dev_x);
hipFree(dev_y);
hipFree(dev_z);
hipFree(dev_result);
printArray(result, N-1);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13compute_dist2PfS_S_S_
.globl _Z13compute_dist2PfS_S_S_
.p2align 8
.type _Z13compute_dist2PfS_S_S_,@function
_Z13compute_dist2PfS_S_S_:
s_load_b256 s[0:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v1, 4, v0
s_waitcnt lgkmcnt(0)
s_clause 0x3
global_load_b32 v2, v0, s[2:3]
global_load_b32 v3, v1, s[2:3]
global_load_b32 v4, v1, s[0:1]
global_load_b32 v5, v0, s[0:1]
s_clause 0x1
global_load_b32 v1, v1, s[4:5]
global_load_b32 v6, v0, s[4:5]
s_waitcnt vmcnt(2)
v_dual_sub_f32 v2, v3, v2 :: v_dual_sub_f32 v3, v4, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_f32_e32 v2, v2, v2
s_waitcnt vmcnt(0)
v_dual_sub_f32 v1, v1, v6 :: v_dual_fmac_f32 v2, v3, v3
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v2, v1, v1
global_store_b32 v0, v2, s[6:7]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13compute_dist2PfS_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13compute_dist2PfS_S_S_, .Lfunc_end0-_Z13compute_dist2PfS_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13compute_dist2PfS_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z13compute_dist2PfS_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#define N 10
typedef struct {
float x[N];
float y[N];
float z[N];
} coord;
__global__
void compute_dist2(float *x, float *y, float *z, float *result) {
int index = threadIdx.x;
float deltaX = x[index+1] - x[index];
float deltaY = y[index+1] - y[index];
float deltaZ = z[index+1] - z[index];
result[index] = deltaX*deltaX + deltaY*deltaY + deltaZ*deltaZ;
}
void loadRandomCoords(coord *c) {
int i;
srand(time(NULL));
for (i = 0; i<N; i++) {
c->x[i] = rand();
c->y[i] = rand();
c->z[i] = rand();
}
}
void loadTestCoords(coord *c) {
int i;
for (i = 0; i<N; i++) {
c->x[i] = i+1;
c->y[i] = i+2;
c->z[i] = i+3;
}
}
void printArray(float *f, int len) {
int i;
for (i = 0; i<len; i++) {
printf("%f ", f[i]);
}
printf("\n");
}
int main() {
coord c;
float result[N-1];
float *dev_x, *dev_y, *dev_z, *dev_result;
dim3 grid(1,1), block(N-1,1);
//Load coordinates into host arrays.
loadRandomCoords(&c);
//Allocate memory for device pointers.
hipMalloc(&dev_x, N*sizeof(float));
hipMalloc(&dev_y, N*sizeof(float));
hipMalloc(&dev_z, N*sizeof(float));
hipMalloc(&dev_result, (N-1)*sizeof(float));
//Transfer coordinates from host to device.
hipMemcpy(dev_x, c.x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_y, c.y, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_z, c.z, N*sizeof(float), hipMemcpyHostToDevice);
//Call kernel to compute euclidean distance b/w adjacent points squared.
//Be sure to only use device pointers since device can't access host mem
//and vice versa.
compute_dist2 <<<grid,block>>>(dev_x, dev_y, dev_z, dev_result);
//Transfer results from device memory to host memory.
hipMemcpy(result, dev_result, (N-1)*sizeof(float), hipMemcpyDeviceToHost);
//Free device memory.
hipFree(dev_x);
hipFree(dev_y);
hipFree(dev_z);
hipFree(dev_result);
printArray(result, N-1);
return 0;
} | .text
.file "euclidean_dist.hip"
.globl _Z28__device_stub__compute_dist2PfS_S_S_ # -- Begin function _Z28__device_stub__compute_dist2PfS_S_S_
.p2align 4, 0x90
.type _Z28__device_stub__compute_dist2PfS_S_S_,@function
_Z28__device_stub__compute_dist2PfS_S_S_: # @_Z28__device_stub__compute_dist2PfS_S_S_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movq %rcx, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13compute_dist2PfS_S_S_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z28__device_stub__compute_dist2PfS_S_S_, .Lfunc_end0-_Z28__device_stub__compute_dist2PfS_S_S_
.cfi_endproc
# -- End function
.globl _Z16loadRandomCoordsP5coord # -- Begin function _Z16loadRandomCoordsP5coord
.p2align 4, 0x90
.type _Z16loadRandomCoordsP5coord,@function
_Z16loadRandomCoordsP5coord: # @_Z16loadRandomCoordsP5coord
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rdi, %rbx
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
movq $-10, %r14
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 40(%rbx,%r14,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 80(%rbx,%r14,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 120(%rbx,%r14,4)
incq %r14
jne .LBB1_1
# %bb.2:
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z16loadRandomCoordsP5coord, .Lfunc_end1-_Z16loadRandomCoordsP5coord
.cfi_endproc
# -- End function
.globl _Z14loadTestCoordsP5coord # -- Begin function _Z14loadTestCoordsP5coord
.p2align 4, 0x90
.type _Z14loadTestCoordsP5coord,@function
_Z14loadTestCoordsP5coord: # @_Z14loadTestCoordsP5coord
.cfi_startproc
# %bb.0:
movq $-10, %rax
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
leal 11(%rax), %ecx
xorps %xmm0, %xmm0
cvtsi2ss %ecx, %xmm0
leal 12(%rax), %ecx
xorps %xmm1, %xmm1
cvtsi2ss %ecx, %xmm1
leal 13(%rax), %ecx
xorps %xmm2, %xmm2
cvtsi2ss %ecx, %xmm2
movss %xmm0, 40(%rdi,%rax,4)
movss %xmm1, 80(%rdi,%rax,4)
movss %xmm2, 120(%rdi,%rax,4)
incq %rax
jne .LBB2_1
# %bb.2:
retq
.Lfunc_end2:
.size _Z14loadTestCoordsP5coord, .Lfunc_end2-_Z14loadTestCoordsP5coord
.cfi_endproc
# -- End function
.globl _Z10printArrayPfi # -- Begin function _Z10printArrayPfi
.p2align 4, 0x90
.type _Z10printArrayPfi,@function
_Z10printArrayPfi: # @_Z10printArrayPfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB3_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %r15
cmpq %r15, %r14
jne .LBB3_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB3_4: # %._crit_edge
movl $10, %edi
jmp putchar@PLT # TAILCALL
.Lfunc_end3:
.size _Z10printArrayPfi, .Lfunc_end3-_Z10printArrayPfi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $272, %rsp # imm = 0x110
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -16
xorl %ebx, %ebx
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
.p2align 4, 0x90
.LBB4_1: # =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 152(%rsp,%rbx,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 192(%rsp,%rbx,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 232(%rsp,%rbx,4)
incq %rbx
cmpq $10, %rbx
jne .LBB4_1
# %bb.2: # %_Z16loadRandomCoordsP5coord.exit
leaq 24(%rsp), %rdi
movl $40, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $40, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $40, %esi
callq hipMalloc
movq %rsp, %rdi
movl $36, %esi
callq hipMalloc
movq 24(%rsp), %rdi
leaq 152(%rsp), %rsi
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
leaq 192(%rsp), %rsi
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 232(%rsp), %rsi
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 8(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_4
# %bb.3:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq (%rsp), %rsi
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rsi, 80(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z13compute_dist2PfS_S_S_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_4:
movq (%rsp), %rsi
leaq 112(%rsp), %rdi
movl $36, %edx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_5: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movss 112(%rsp,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %rbx
cmpq $9, %rbx
jne .LBB4_5
# %bb.6: # %_Z10printArrayPfi.exit
movl $10, %edi
callq putchar@PLT
xorl %eax, %eax
addq $272, %rsp # imm = 0x110
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13compute_dist2PfS_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13compute_dist2PfS_S_S_,@object # @_Z13compute_dist2PfS_S_S_
.section .rodata,"a",@progbits
.globl _Z13compute_dist2PfS_S_S_
.p2align 3, 0x0
_Z13compute_dist2PfS_S_S_:
.quad _Z28__device_stub__compute_dist2PfS_S_S_
.size _Z13compute_dist2PfS_S_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%f "
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z13compute_dist2PfS_S_S_"
.size .L__unnamed_1, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__compute_dist2PfS_S_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13compute_dist2PfS_S_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z13compute_dist2PfS_S_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R12, SR_TID.X ; /* 0x00000000000c7919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R15, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0f7435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R4, R12, R15, c[0x0][0x168] ; /* 0x00005a000c047625 */
/* 0x001fc800078e020f */
/*0050*/ IMAD.WIDE R2, R12.reuse, R15.reuse, c[0x0][0x160] ; /* 0x000058000c027625 */
/* 0x0c0fe200078e020f */
/*0060*/ LDG.E R8, [R4.64+0x4] ; /* 0x0000040404087981 */
/* 0x0000a8000c1e1900 */
/*0070*/ LDG.E R11, [R4.64] ; /* 0x00000004040b7981 */
/* 0x0000a2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R12, R15, c[0x0][0x170] ; /* 0x00005c000c067625 */
/* 0x000fc600078e020f */
/*0090*/ LDG.E R0, [R2.64+0x4] ; /* 0x0000040402007981 */
/* 0x000ee8000c1e1900 */
/*00a0*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */
/* 0x000ee8000c1e1900 */
/*00b0*/ LDG.E R10, [R6.64+0x4] ; /* 0x00000404060a7981 */
/* 0x000f28000c1e1900 */
/*00c0*/ LDG.E R13, [R6.64] ; /* 0x00000004060d7981 */
/* 0x000f22000c1e1900 */
/*00d0*/ IMAD.WIDE R4, R12, R15, c[0x0][0x178] ; /* 0x00005e000c047625 */
/* 0x001fc800078e020f */
/*00e0*/ FADD R8, R8, -R11 ; /* 0x8000000b08087221 */
/* 0x004fe40000000000 */
/*00f0*/ FADD R0, R0, -R9 ; /* 0x8000000900007221 */
/* 0x008fe40000000000 */
/*0100*/ FMUL R9, R8, R8 ; /* 0x0000000808097220 */
/* 0x000fc80000400000 */
/*0110*/ FFMA R9, R0, R0, R9 ; /* 0x0000000000097223 */
/* 0x000fe40000000009 */
/*0120*/ FADD R10, R10, -R13 ; /* 0x8000000d0a0a7221 */
/* 0x010fc80000000000 */
/*0130*/ FFMA R9, R10, R10, R9 ; /* 0x0000000a0a097223 */
/* 0x000fca0000000009 */
/*0140*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x000fe2000c101904 */
/*0150*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0160*/ BRA 0x160; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13compute_dist2PfS_S_S_
.globl _Z13compute_dist2PfS_S_S_
.p2align 8
.type _Z13compute_dist2PfS_S_S_,@function
_Z13compute_dist2PfS_S_S_:
s_load_b256 s[0:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v1, 4, v0
s_waitcnt lgkmcnt(0)
s_clause 0x3
global_load_b32 v2, v0, s[2:3]
global_load_b32 v3, v1, s[2:3]
global_load_b32 v4, v1, s[0:1]
global_load_b32 v5, v0, s[0:1]
s_clause 0x1
global_load_b32 v1, v1, s[4:5]
global_load_b32 v6, v0, s[4:5]
s_waitcnt vmcnt(2)
v_dual_sub_f32 v2, v3, v2 :: v_dual_sub_f32 v3, v4, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_f32_e32 v2, v2, v2
s_waitcnt vmcnt(0)
v_dual_sub_f32 v1, v1, v6 :: v_dual_fmac_f32 v2, v3, v3
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v2, v1, v1
global_store_b32 v0, v2, s[6:7]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13compute_dist2PfS_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13compute_dist2PfS_S_S_, .Lfunc_end0-_Z13compute_dist2PfS_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13compute_dist2PfS_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z13compute_dist2PfS_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00061334_00000000-6_euclidean_dist.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z16loadRandomCoordsP5coord
.type _Z16loadRandomCoordsP5coord, @function
_Z16loadRandomCoordsP5coord:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbp
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movq %rbp, %rbx
addq $40, %rbp
.L4:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 40(%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 80(%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L4
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z16loadRandomCoordsP5coord, .-_Z16loadRandomCoordsP5coord
.globl _Z14loadTestCoordsP5coord
.type _Z14loadTestCoordsP5coord, @function
_Z14loadTestCoordsP5coord:
.LFB2058:
.cfi_startproc
endbr64
movl $1, %eax
.L8:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, -4(%rdi,%rax,4)
leal 1(%rax), %edx
pxor %xmm0, %xmm0
cvtsi2ssl %edx, %xmm0
movss %xmm0, 36(%rdi,%rax,4)
leal 2(%rax), %edx
pxor %xmm0, %xmm0
cvtsi2ssl %edx, %xmm0
movss %xmm0, 76(%rdi,%rax,4)
addq $1, %rax
cmpq $11, %rax
jne .L8
ret
.cfi_endproc
.LFE2058:
.size _Z14loadTestCoordsP5coord, .-_Z14loadTestCoordsP5coord
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%f "
.LC1:
.string "\n"
.text
.globl _Z10printArrayPfi
.type _Z10printArrayPfi, @function
_Z10printArrayPfi:
.LFB2059:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
testl %esi, %esi
jle .L11
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %r12
leaq .LC0(%rip), %rbp
.L12:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L12
.L11:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z10printArrayPfi, .-_Z10printArrayPfi
.globl _Z39__device_stub__Z13compute_dist2PfS_S_S_PfS_S_S_
.type _Z39__device_stub__Z13compute_dist2PfS_S_S_PfS_S_S_, @function
_Z39__device_stub__Z13compute_dist2PfS_S_S_PfS_S_S_:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13compute_dist2PfS_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z39__device_stub__Z13compute_dist2PfS_S_S_PfS_S_S_, .-_Z39__device_stub__Z13compute_dist2PfS_S_S_PfS_S_S_
.globl _Z13compute_dist2PfS_S_S_
.type _Z13compute_dist2PfS_S_S_, @function
_Z13compute_dist2PfS_S_S_:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z13compute_dist2PfS_S_S_PfS_S_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z13compute_dist2PfS_S_S_, .-_Z13compute_dist2PfS_S_S_
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $240, %rsp
.cfi_def_cfa_offset 256
movq %fs:40, %rax
movq %rax, 232(%rsp)
xorl %eax, %eax
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $9, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
leaq 112(%rsp), %rbx
movq %rbx, %rdi
call _Z16loadRandomCoordsP5coord
leaq 8(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $36, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $40, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 152(%rsp), %rsi
movl $1, %ecx
movl $40, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 192(%rsp), %rsi
movl $1, %ecx
movl $40, %edx
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl 60(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 52(%rsp), %rdx
movq 40(%rsp), %rdi
movl 48(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L27
.L24:
leaq 64(%rsp), %rbx
movl $2, %ecx
movl $36, %edx
movq 32(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movl $9, %esi
movq %rbx, %rdi
call _Z10printArrayPfi
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L28
movl $0, %eax
addq $240, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L27:
.cfi_restore_state
movq 32(%rsp), %rcx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z39__device_stub__Z13compute_dist2PfS_S_S_PfS_S_S_
jmp .L24
.L28:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z13compute_dist2PfS_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z13compute_dist2PfS_S_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "euclidean_dist.hip"
.globl _Z28__device_stub__compute_dist2PfS_S_S_ # -- Begin function _Z28__device_stub__compute_dist2PfS_S_S_
.p2align 4, 0x90
.type _Z28__device_stub__compute_dist2PfS_S_S_,@function
_Z28__device_stub__compute_dist2PfS_S_S_: # @_Z28__device_stub__compute_dist2PfS_S_S_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movq %rcx, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13compute_dist2PfS_S_S_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z28__device_stub__compute_dist2PfS_S_S_, .Lfunc_end0-_Z28__device_stub__compute_dist2PfS_S_S_
.cfi_endproc
# -- End function
.globl _Z16loadRandomCoordsP5coord # -- Begin function _Z16loadRandomCoordsP5coord
.p2align 4, 0x90
.type _Z16loadRandomCoordsP5coord,@function
_Z16loadRandomCoordsP5coord: # @_Z16loadRandomCoordsP5coord
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rdi, %rbx
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
movq $-10, %r14
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 40(%rbx,%r14,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 80(%rbx,%r14,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 120(%rbx,%r14,4)
incq %r14
jne .LBB1_1
# %bb.2:
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z16loadRandomCoordsP5coord, .Lfunc_end1-_Z16loadRandomCoordsP5coord
.cfi_endproc
# -- End function
.globl _Z14loadTestCoordsP5coord # -- Begin function _Z14loadTestCoordsP5coord
.p2align 4, 0x90
.type _Z14loadTestCoordsP5coord,@function
_Z14loadTestCoordsP5coord: # @_Z14loadTestCoordsP5coord
.cfi_startproc
# %bb.0:
movq $-10, %rax
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
leal 11(%rax), %ecx
xorps %xmm0, %xmm0
cvtsi2ss %ecx, %xmm0
leal 12(%rax), %ecx
xorps %xmm1, %xmm1
cvtsi2ss %ecx, %xmm1
leal 13(%rax), %ecx
xorps %xmm2, %xmm2
cvtsi2ss %ecx, %xmm2
movss %xmm0, 40(%rdi,%rax,4)
movss %xmm1, 80(%rdi,%rax,4)
movss %xmm2, 120(%rdi,%rax,4)
incq %rax
jne .LBB2_1
# %bb.2:
retq
.Lfunc_end2:
.size _Z14loadTestCoordsP5coord, .Lfunc_end2-_Z14loadTestCoordsP5coord
.cfi_endproc
# -- End function
.globl _Z10printArrayPfi # -- Begin function _Z10printArrayPfi
.p2align 4, 0x90
.type _Z10printArrayPfi,@function
_Z10printArrayPfi: # @_Z10printArrayPfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB3_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %r15
cmpq %r15, %r14
jne .LBB3_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB3_4: # %._crit_edge
movl $10, %edi
jmp putchar@PLT # TAILCALL
.Lfunc_end3:
.size _Z10printArrayPfi, .Lfunc_end3-_Z10printArrayPfi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $272, %rsp # imm = 0x110
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -16
xorl %ebx, %ebx
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
.p2align 4, 0x90
.LBB4_1: # =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 152(%rsp,%rbx,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 192(%rsp,%rbx,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 232(%rsp,%rbx,4)
incq %rbx
cmpq $10, %rbx
jne .LBB4_1
# %bb.2: # %_Z16loadRandomCoordsP5coord.exit
leaq 24(%rsp), %rdi
movl $40, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $40, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $40, %esi
callq hipMalloc
movq %rsp, %rdi
movl $36, %esi
callq hipMalloc
movq 24(%rsp), %rdi
leaq 152(%rsp), %rsi
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
leaq 192(%rsp), %rsi
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 232(%rsp), %rsi
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 8(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_4
# %bb.3:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq (%rsp), %rsi
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rsi, 80(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z13compute_dist2PfS_S_S_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_4:
movq (%rsp), %rsi
leaq 112(%rsp), %rdi
movl $36, %edx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_5: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movss 112(%rsp,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %rbx
cmpq $9, %rbx
jne .LBB4_5
# %bb.6: # %_Z10printArrayPfi.exit
movl $10, %edi
callq putchar@PLT
xorl %eax, %eax
addq $272, %rsp # imm = 0x110
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13compute_dist2PfS_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13compute_dist2PfS_S_S_,@object # @_Z13compute_dist2PfS_S_S_
.section .rodata,"a",@progbits
.globl _Z13compute_dist2PfS_S_S_
.p2align 3, 0x0
_Z13compute_dist2PfS_S_S_:
.quad _Z28__device_stub__compute_dist2PfS_S_S_
.size _Z13compute_dist2PfS_S_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%f "
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z13compute_dist2PfS_S_S_"
.size .L__unnamed_1, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__compute_dist2PfS_S_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13compute_dist2PfS_S_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //Author: Manjari Pokala
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <math.h>
//Reading array A from input file inp.txt
typedef struct {
int *array;
size_t used;
size_t size;
} Array;
void initArray(Array *a, size_t initialSize) {
a->array = (int*) malloc(initialSize * sizeof(int));
a->used = 0;
a->size = initialSize;
}
void insertArray(Array *a, int element) {
if (a->used == a->size) {
a->size += 1;
a->array =(int*) realloc(a->array, a->size * sizeof(int));
}
a->array[a->used++] = element;
}
Array initArrayA(){
FILE *fp;
char str[50000];
Array a;
initArray(&a, 1);
/* opening file for reading */
fp = fopen("inp.txt" , "r");
if(fp == NULL) {
printf("%s","error");
return a;
}
while( fgets (str, 50000, fp)!=NULL ) {
/* writing content to stdout */
// printf("%s\n", str);
char* token;
char* rest = str;
while ((token = strtok_r(rest, " , ", &rest)))
insertArray(&a, atoi(token));
}
fclose(fp);
return a;
}
//Asserts for GPU errors
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
printf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void global_reduce_kernel(int * d_out, int * d_in, int size)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// do reduction in global mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s && myId < size && myId+s < size)
{
d_in[myId] += d_in[myId + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = d_in[myId];
}
}
__global__ void shmem_reduce_kernel(int * d_out, const int * d_in, int size)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s && myId < size && myId+s < size)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
//kernel to create an intermediate array corresponding to given bins of array B
__global__ void global_count_range_bins_kernel(int * d_bins, int * d_binsin, int size, int x, int y)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// stride is the total number of threads in the grid
// Using stride increases the performance and benefits with scalability & thread reusage
int stride = blockDim.x * gridDim.x;
// assign flags in global memory
for (; myId < size; myId += stride)
{
if (d_binsin[myId] >= x && d_binsin[myId] <= y) {
d_bins[myId] = 1;
} else {
d_bins[myId] = 0;
}
__syncthreads(); // make sure all adds at one stage are done!
}
}
//kernel to perform parallel prefix sum
//assumes only 1 block (1 block can be utilized since we have only 10 elements)
__global__ void prefixsum(int *d_out, int * d_in, int size)
{
extern __shared__ int sh_mem[];
int tid = threadIdx.x;
int myId = blockIdx.x * blockDim.x + threadIdx.x;
sh_mem[tid] = d_in[myId];
__syncthreads();
if (myId < size)
{
for (int d = 1; d < blockDim.x; d *=2)
{
if (tid >= d) {
sh_mem[tid] += sh_mem[tid - d];
}
__syncthreads();
}
}
d_out[myId] = sh_mem[tid];
}
//Function to call corresponding kernel based on memory usage
void reduce(int * d_out, int * d_intermediate, int * d_in,
int size, bool usesSharedMemory)
{
const int maxThreadsPerBlock = 512;
int threads = maxThreadsPerBlock;
// handles non power of 2 arrays
int blocks = ceil(float(size) / float(maxThreadsPerBlock));
if (usesSharedMemory)
{
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>
(d_intermediate, d_in, size);
}
else
{
global_reduce_kernel<<<blocks, threads>>>
(d_intermediate, d_in, size);
gpuErrorCheck( cudaPeekAtLastError() );
gpuErrorCheck( cudaDeviceSynchronize() );
}
// now we're down to one block left, so reduce it
threads = blocks; // launch one thread for each block in prev step
blocks = 1;
if (usesSharedMemory)
{
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>
(d_out, d_intermediate, size);
}
else
{
global_reduce_kernel<<<blocks, threads>>>
(d_out, d_intermediate, size);
}
}
int main(int argc, char **argv)
{
FILE *q2a;
FILE *q2b;
FILE *q2c;
q2a = fopen("q2a.txt", "w");
q2b = fopen("q2b.txt", "w");
q2c = fopen("q2c.txt", "w");
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(q2a, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
fprintf(q2a, "Using device %d:\n", dev);
fprintf(q2a, "%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
// generate the input array on the host
Array A = initArrayA();
int * h_in = A.array;
const int ARRAY_SIZE = A.size;
const int ARRAY_BYTES = A.size * sizeof(int);
fprintf(q2a, "array size is %d\n", ARRAY_SIZE);
// declare GPU memory pointers
int * d_in, * d_intermediate, * d_out, * d_bins, *d_binsin, *prefix_out, *prefix_in;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_binsin, ARRAY_BYTES);
cudaMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated
cudaMalloc((void **) &d_bins, ARRAY_BYTES);
cudaMalloc((void **) &d_out, sizeof(int));
// allocate memory for prefix sum, it has only 10 buckets
cudaMalloc((void **) &prefix_out, 10*sizeof(int));
cudaMalloc((void **) &prefix_in, 10*sizeof(int));
const int maxThreadsPerBlock = 512;
int threads = maxThreadsPerBlock;
// handles non power of 2 arrays
int blocks = ceil(float(ARRAY_SIZE) / float(maxThreadsPerBlock));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsedTime;
//Problem 2a - Using Global Memory to get counts(from Reduction)
fprintf(q2a,"Using Global Memory to get counts(from Reduction\n");
// copy back the bin counts from GPU
int b[10];
// transfer the input array to the GPU
gpuErrorCheck( cudaMemcpy(d_binsin, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice));
//fprintf(q2a, "Running global count\n");
//Bin 1
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 0, 99);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
gpuErrorCheck( cudaPeekAtLastError() );
gpuErrorCheck( cudaDeviceSynchronize() );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 1 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[0], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 2
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 100, 199);
gpuErrorCheck( cudaPeekAtLastError() );
gpuErrorCheck( cudaDeviceSynchronize() );
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 2 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[1], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 3
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 200, 299);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 3 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[2], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 4
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 300, 399);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 4 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[3], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 5
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 400, 499);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 5 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[4], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 6
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 500, 599);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 6 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[5], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 7
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 600, 699);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 7 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[6], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 8
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 700, 799);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 8 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[7], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 9
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 800, 899);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 9 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[8], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 10
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 900, 999);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 10 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[9], d_out, sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 10; i++) {
fprintf(q2a, "Global Memory - count returned by device: %d\n", b[i]);
}
//Problem 2b - Using Shared Memory to get counts(from Reduction)
fprintf(q2b,"Using Shared Memory to get counts(from Reduction\n");
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(q2b, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
dev = 0;
cudaSetDevice(dev);
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
fprintf(q2b, "Using device %d:\n", dev);
fprintf(q2b, "%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
fprintf(q2b,"array size is %d\n", ARRAY_SIZE);
// copy back the bin counts from GPU
int s[10];
// transfer the input array to the GPU
cudaMemcpy(d_binsin, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
//fprintf(q2b, "Running shared count\n");
//Bin 1
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 0, 99);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 1 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[0], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 2
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 100, 199);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 2 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[1], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 3
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 200, 299);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 3 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[2], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 4
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 300, 399);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 4 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[3], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 5
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 400, 499);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 5 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[4], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 6
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 500, 599);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 6 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[5], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 7
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 600, 699);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 7 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[6], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 8
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 700, 799);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 8 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[7], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 9
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 800, 899);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 9 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[8], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 10
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 900, 999);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 10 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[9], d_out, sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 10; i++) {
fprintf(q2b, "Shared Memory - count returned by device: %d\n", s[i]);
}
//Problem 2c - Using Parallel Prefix Scan
fprintf(q2c,"Using Parallel Prefix Scan to generate C\n");
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(q2c, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
dev = 0;
cudaSetDevice(dev);
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
fprintf(q2c, "Using device %d:\n", dev);
fprintf(q2c, "%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
// transfer the input scan array to the GPU
cudaMemcpy(prefix_in, b, 10 * sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
prefixsum<<<1, 10, 10 * sizeof(int)>>>(prefix_out, prefix_in, 10);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
gpuErrorCheck( cudaPeekAtLastError() );
gpuErrorCheck( cudaDeviceSynchronize() );
// copy back the prefix sum from GPU
int c[10];
cudaMemcpy(&c, prefix_out, 10*sizeof(int), cudaMemcpyDeviceToHost);
fprintf(q2c, "Prefix Sum - average time elapsed: %f\n", elapsedTime);
for(int i = 0; i < 10; i++) {
fprintf(q2c, "Prefix Sum returned by device: %d\n", c[i]);
}
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_intermediate);
cudaFree(d_out);
cudaFree(d_bins);
cudaFree(d_binsin);
cudaFree(prefix_out);
cudaFree(prefix_in);
return 0;
}
// Reference: https://github.com/manjaripokala/sum20-Parallel-algs/blob/master/cuda-examples/reduce.cu | code for sm_80
Function : _Z9prefixsumPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x001fc800078e0205 */
/*0060*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff047624 */
/* 0x000fca00078e00ff */
/*0090*/ ISETP.GE.U32.AND P0, PT, R4, 0x2, PT ; /* 0x000000020400780c */
/* 0x000fc80003f06070 */
/*00a0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x170], !P0 ; /* 0x00005c0000007a0c */
/* 0x000fe20004706670 */
/*00b0*/ STS [R5.X4], R2 ; /* 0x0000000205007388 */
/* 0x0041e80000004800 */
/*00c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00d0*/ @P0 BRA 0x1a0 ; /* 0x000000c000000947 */
/* 0x000fea0003800000 */
/*00e0*/ HFMA2.MMA R2, -RZ, RZ, 0, 5.9604644775390625e-08 ; /* 0x00000001ff027435 */
/* 0x001fd400000001ff */
/*00f0*/ ISETP.GE.AND P0, PT, R5, R2, PT ; /* 0x000000020500720c */
/* 0x000fe20003f06270 */
/*0100*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fd80003800000 */
/*0110*/ @P0 IMAD.IADD R3, R5, 0x1, -R2 ; /* 0x0000000105030824 */
/* 0x000fe200078e0a02 */
/*0120*/ @P0 LDS R4, [R5.X4] ; /* 0x0000000005040984 */
/* 0x000fe20000004800 */
/*0130*/ SHF.L.U32 R2, R2, 0x1, RZ ; /* 0x0000000102027819 */
/* 0x000fc800000006ff */
/*0140*/ @P0 LDS R3, [R3.X4] ; /* 0x0000000003030984 */
/* 0x000e240000004800 */
/*0150*/ @P0 IMAD.IADD R4, R4, 0x1, R3 ; /* 0x0000000104040824 */
/* 0x001fca00078e0203 */
/*0160*/ @P0 STS [R5.X4], R4 ; /* 0x0000000405000388 */
/* 0x0001e80000004800 */
/*0170*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0180*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x0], PT ; /* 0x0000000002007a0c */
/* 0x000fda0003f06070 */
/*0190*/ @!P0 BRA 0xf0 ; /* 0xffffff5000008947 */
/* 0x001fea000383ffff */
/*01a0*/ LDS R5, [R5.X4] ; /* 0x0000000005057984 */
/* 0x001e220000004800 */
/*01b0*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */
/* 0x000fe40000011400 */
/*01c0*/ LEA R2, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000027a11 */
/* 0x000fc800078010ff */
/*01d0*/ LEA.HI.X R3, R0, c[0x0][0x164], R3, 0x2, P0 ; /* 0x0000590000037a11 */
/* 0x000fca00000f1403 */
/*01e0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*01f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0200*/ BRA 0x200; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z30global_count_range_bins_kernelPiS_iii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0070*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x001fd400000001ff */
/*0080*/ IMAD.WIDE R2, R0, R5, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0205 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff097624 */
/* 0x000fe200078e00ff */
/*00b0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*00c0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fc800078e0205 */
/*00d0*/ IMAD R0, R9, c[0x0][0xc], R0 ; /* 0x0000030009007a24 */
/* 0x000fe200078e0200 */
/*00e0*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x174], PT ; /* 0x00005d0002007a0c */
/* 0x004fc80003f06270 */
/*00f0*/ ISETP.LE.AND P0, PT, R2, c[0x0][0x178], P0 ; /* 0x00005e0002007a0c */
/* 0x000fc80000703270 */
/*0100*/ SEL R7, RZ, 0x1, !P0 ; /* 0x00000001ff077807 */
/* 0x000fe40004000000 */
/*0110*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fc60003f06270 */
/*0120*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001e8000c101904 */
/*0130*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0140*/ @!P0 BRA 0x70 ; /* 0xffffff2000008947 */
/* 0x000fea000383ffff */
/*0150*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0160*/ BRA 0x160; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z19shmem_reduce_kernelPiPKii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R6, c[0x0][0x0], R7 ; /* 0x0000000006007a24 */
/* 0x001fc800078e0207 */
/*0060*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0090*/ ISETP.NE.AND P1, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f25270 */
/*00a0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00b0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf05270 */
/*00c0*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041e80000004800 */
/*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00e0*/ @!P0 BRA 0x1e0 ; /* 0x000000f000008947 */
/* 0x000fea0003800000 */
/*00f0*/ SHF.L.U32 R2, R7, 0x2, RZ ; /* 0x0000000207027819 */
/* 0x001fe200000006ff */
/*0100*/ IMAD.U32 R3, RZ, RZ, UR4 ; /* 0x00000004ff037e24 */
/* 0x000fca000f8e00ff */
/*0110*/ ISETP.GE.U32.AND P0, PT, R7, R3, PT ; /* 0x000000030700720c */
/* 0x000fe20003f06070 */
/*0120*/ IMAD.IADD R4, R0, 0x1, R3 ; /* 0x0000000100047824 */
/* 0x000fc600078e0203 */
/*0130*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fc80000706670 */
/*0140*/ ISETP.GE.U32.OR P0, PT, R4, c[0x0][0x170], P0 ; /* 0x00005c0004007a0c */
/* 0x000fda0000706470 */
/*0150*/ @!P0 LEA R4, R3, R2, 0x2 ; /* 0x0000000203048211 */
/* 0x000fe200078e10ff */
/*0160*/ @!P0 LDS R5, [R7.X4] ; /* 0x0000000007058984 */
/* 0x000fe20000004800 */
/*0170*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc80000011603 */
/*0180*/ @!P0 LDS R4, [R4] ; /* 0x0000000004048984 */
/* 0x000e240000000800 */
/*0190*/ @!P0 IMAD.IADD R5, R5, 0x1, R4 ; /* 0x0000000105058824 */
/* 0x001fca00078e0204 */
/*01a0*/ @!P0 STS [R7.X4], R5 ; /* 0x0000000507008388 */
/* 0x0001e80000004800 */
/*01b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*01c0*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f05270 */
/*01d0*/ @P0 BRA 0x110 ; /* 0xffffff3000000947 */
/* 0x001fea000383ffff */
/*01e0*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x001fea0003800000 */
/*01f0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*0200*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fd400000001ff */
/*0210*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fca00078e0003 */
/*0220*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0230*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0240*/ BRA 0x240; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z20global_reduce_kernelPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0030*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0040*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fe20008011604 */
/*0050*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e220000002100 */
/*0060*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fc80000000a00 */
/*0070*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf05270 */
/*0080*/ IMAD R0, R8, c[0x0][0x0], R9 ; /* 0x0000000008007a24 */
/* 0x001fc800078e0209 */
/*0090*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fd000078e0203 */
/*00a0*/ @!P0 BRA 0x1c0 ; /* 0x0000011000008947 */
/* 0x000fea0003800000 */
/*00b0*/ ISETP.GE.U32.AND P0, PT, R9, UR4, PT ; /* 0x0000000409007c0c */
/* 0x000fe2000bf06070 */
/*00c0*/ BSSY B0, 0x180 ; /* 0x000000b000007945 */
/* 0x000fe20003800000 */
/*00d0*/ IADD3 R4, R0.reuse, UR4, RZ ; /* 0x0000000400047c10 */
/* 0x040fe4000fffe0ff */
/*00e0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fc80000706670 */
/*00f0*/ ISETP.GE.U32.OR P0, PT, R4, c[0x0][0x170], P0 ; /* 0x00005c0004007a0c */
/* 0x000fda0000706470 */
/*0100*/ @P0 BRA 0x170 ; /* 0x0000006000000947 */
/* 0x001fea0003800000 */
/*0110*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0120*/ LDG.E R6, [R2.64] ; /* 0x0000000602067981 */
/* 0x000eb2000c1e1900 */
/*0130*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fcc00078e0005 */
/*0140*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */
/* 0x000ea4000c1e1900 */
/*0150*/ IMAD.IADD R7, R6, 0x1, R5 ; /* 0x0000000106077824 */
/* 0x004fca00078e0205 */
/*0160*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e4000c101906 */
/*0170*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0180*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fe20008011604 */
/*0190*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fea0000010000 */
/*01a0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fda000bf05270 */
/*01b0*/ @P0 BRA 0xb0 ; /* 0xfffffef000000947 */
/* 0x000fea000383ffff */
/*01c0*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fda0003f05270 */
/*01d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*01e0*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x001ea2000c1e1900 */
/*01f0*/ MOV R5, 0x4 ; /* 0x0000000400057802 */
/* 0x000fca0000000f00 */
/*0200*/ IMAD.WIDE.U32 R4, R8, R5, c[0x0][0x160] ; /* 0x0000580008047625 */
/* 0x000fca00078e0005 */
/*0210*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101906 */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //Author: Manjari Pokala
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <math.h>
//Reading array A from input file inp.txt
typedef struct {
int *array;
size_t used;
size_t size;
} Array;
void initArray(Array *a, size_t initialSize) {
a->array = (int*) malloc(initialSize * sizeof(int));
a->used = 0;
a->size = initialSize;
}
void insertArray(Array *a, int element) {
if (a->used == a->size) {
a->size += 1;
a->array =(int*) realloc(a->array, a->size * sizeof(int));
}
a->array[a->used++] = element;
}
Array initArrayA(){
FILE *fp;
char str[50000];
Array a;
initArray(&a, 1);
/* opening file for reading */
fp = fopen("inp.txt" , "r");
if(fp == NULL) {
printf("%s","error");
return a;
}
while( fgets (str, 50000, fp)!=NULL ) {
/* writing content to stdout */
// printf("%s\n", str);
char* token;
char* rest = str;
while ((token = strtok_r(rest, " , ", &rest)))
insertArray(&a, atoi(token));
}
fclose(fp);
return a;
}
//Asserts for GPU errors
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
printf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void global_reduce_kernel(int * d_out, int * d_in, int size)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// do reduction in global mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s && myId < size && myId+s < size)
{
d_in[myId] += d_in[myId + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = d_in[myId];
}
}
__global__ void shmem_reduce_kernel(int * d_out, const int * d_in, int size)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s && myId < size && myId+s < size)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
//kernel to create an intermediate array corresponding to given bins of array B
__global__ void global_count_range_bins_kernel(int * d_bins, int * d_binsin, int size, int x, int y)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// stride is the total number of threads in the grid
// Using stride increases the performance and benefits with scalability & thread reusage
int stride = blockDim.x * gridDim.x;
// assign flags in global memory
for (; myId < size; myId += stride)
{
if (d_binsin[myId] >= x && d_binsin[myId] <= y) {
d_bins[myId] = 1;
} else {
d_bins[myId] = 0;
}
__syncthreads(); // make sure all adds at one stage are done!
}
}
//kernel to perform parallel prefix sum
//assumes only 1 block (1 block can be utilized since we have only 10 elements)
__global__ void prefixsum(int *d_out, int * d_in, int size)
{
extern __shared__ int sh_mem[];
int tid = threadIdx.x;
int myId = blockIdx.x * blockDim.x + threadIdx.x;
sh_mem[tid] = d_in[myId];
__syncthreads();
if (myId < size)
{
for (int d = 1; d < blockDim.x; d *=2)
{
if (tid >= d) {
sh_mem[tid] += sh_mem[tid - d];
}
__syncthreads();
}
}
d_out[myId] = sh_mem[tid];
}
//Function to call corresponding kernel based on memory usage
void reduce(int * d_out, int * d_intermediate, int * d_in,
int size, bool usesSharedMemory)
{
const int maxThreadsPerBlock = 512;
int threads = maxThreadsPerBlock;
// handles non power of 2 arrays
int blocks = ceil(float(size) / float(maxThreadsPerBlock));
if (usesSharedMemory)
{
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>
(d_intermediate, d_in, size);
}
else
{
global_reduce_kernel<<<blocks, threads>>>
(d_intermediate, d_in, size);
gpuErrorCheck( cudaPeekAtLastError() );
gpuErrorCheck( cudaDeviceSynchronize() );
}
// now we're down to one block left, so reduce it
threads = blocks; // launch one thread for each block in prev step
blocks = 1;
if (usesSharedMemory)
{
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>
(d_out, d_intermediate, size);
}
else
{
global_reduce_kernel<<<blocks, threads>>>
(d_out, d_intermediate, size);
}
}
int main(int argc, char **argv)
{
FILE *q2a;
FILE *q2b;
FILE *q2c;
q2a = fopen("q2a.txt", "w");
q2b = fopen("q2b.txt", "w");
q2c = fopen("q2c.txt", "w");
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(q2a, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
fprintf(q2a, "Using device %d:\n", dev);
fprintf(q2a, "%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
// generate the input array on the host
Array A = initArrayA();
int * h_in = A.array;
const int ARRAY_SIZE = A.size;
const int ARRAY_BYTES = A.size * sizeof(int);
fprintf(q2a, "array size is %d\n", ARRAY_SIZE);
// declare GPU memory pointers
int * d_in, * d_intermediate, * d_out, * d_bins, *d_binsin, *prefix_out, *prefix_in;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_binsin, ARRAY_BYTES);
cudaMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated
cudaMalloc((void **) &d_bins, ARRAY_BYTES);
cudaMalloc((void **) &d_out, sizeof(int));
// allocate memory for prefix sum, it has only 10 buckets
cudaMalloc((void **) &prefix_out, 10*sizeof(int));
cudaMalloc((void **) &prefix_in, 10*sizeof(int));
const int maxThreadsPerBlock = 512;
int threads = maxThreadsPerBlock;
// handles non power of 2 arrays
int blocks = ceil(float(ARRAY_SIZE) / float(maxThreadsPerBlock));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsedTime;
//Problem 2a - Using Global Memory to get counts(from Reduction)
fprintf(q2a,"Using Global Memory to get counts(from Reduction\n");
// copy back the bin counts from GPU
int b[10];
// transfer the input array to the GPU
gpuErrorCheck( cudaMemcpy(d_binsin, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice));
//fprintf(q2a, "Running global count\n");
//Bin 1
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 0, 99);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
gpuErrorCheck( cudaPeekAtLastError() );
gpuErrorCheck( cudaDeviceSynchronize() );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 1 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[0], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 2
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 100, 199);
gpuErrorCheck( cudaPeekAtLastError() );
gpuErrorCheck( cudaDeviceSynchronize() );
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 2 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[1], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 3
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 200, 299);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 3 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[2], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 4
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 300, 399);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 4 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[3], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 5
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 400, 499);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 5 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[4], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 6
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 500, 599);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 6 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[5], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 7
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 600, 699);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 7 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[6], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 8
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 700, 799);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 8 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[7], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 9
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 800, 899);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 9 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[8], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 10
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 900, 999);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 10 - average time elapsed using global memory: %f\n", elapsedTime);
cudaMemcpy(&b[9], d_out, sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 10; i++) {
fprintf(q2a, "Global Memory - count returned by device: %d\n", b[i]);
}
//Problem 2b - Using Shared Memory to get counts(from Reduction)
fprintf(q2b,"Using Shared Memory to get counts(from Reduction\n");
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(q2b, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
dev = 0;
cudaSetDevice(dev);
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
fprintf(q2b, "Using device %d:\n", dev);
fprintf(q2b, "%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
fprintf(q2b,"array size is %d\n", ARRAY_SIZE);
// copy back the bin counts from GPU
int s[10];
// transfer the input array to the GPU
cudaMemcpy(d_binsin, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
//fprintf(q2b, "Running shared count\n");
//Bin 1
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 0, 99);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 1 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[0], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 2
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 100, 199);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 2 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[1], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 3
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 200, 299);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 3 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[2], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 4
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 300, 399);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 4 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[3], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 5
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 400, 499);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 5 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[4], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 6
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 500, 599);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 6 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[5], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 7
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 600, 699);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 7 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[6], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 8
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 700, 799);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 8 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[7], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 9
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 800, 899);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 9 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[8], d_out, sizeof(int), cudaMemcpyDeviceToHost);
//Bin 10
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 900, 999);
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 10 - average time elapsed using shared memory: %f\n", elapsedTime);
cudaMemcpy(&s[9], d_out, sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 10; i++) {
fprintf(q2b, "Shared Memory - count returned by device: %d\n", s[i]);
}
//Problem 2c - Using Parallel Prefix Scan
fprintf(q2c,"Using Parallel Prefix Scan to generate C\n");
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(q2c, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
dev = 0;
cudaSetDevice(dev);
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
fprintf(q2c, "Using device %d:\n", dev);
fprintf(q2c, "%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
// transfer the input scan array to the GPU
cudaMemcpy(prefix_in, b, 10 * sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
prefixsum<<<1, 10, 10 * sizeof(int)>>>(prefix_out, prefix_in, 10);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
gpuErrorCheck( cudaPeekAtLastError() );
gpuErrorCheck( cudaDeviceSynchronize() );
// copy back the prefix sum from GPU
int c[10];
cudaMemcpy(&c, prefix_out, 10*sizeof(int), cudaMemcpyDeviceToHost);
fprintf(q2c, "Prefix Sum - average time elapsed: %f\n", elapsedTime);
for(int i = 0; i < 10; i++) {
fprintf(q2c, "Prefix Sum returned by device: %d\n", c[i]);
}
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_intermediate);
cudaFree(d_out);
cudaFree(d_bins);
cudaFree(d_binsin);
cudaFree(prefix_out);
cudaFree(prefix_in);
return 0;
}
// Reference: https://github.com/manjaripokala/sum20-Parallel-algs/blob/master/cuda-examples/reduce.cu | //Author: Manjari Pokala
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <math.h>
//Reading array A from input file inp.txt
typedef struct {
int *array;
size_t used;
size_t size;
} Array;
void initArray(Array *a, size_t initialSize) {
a->array = (int*) malloc(initialSize * sizeof(int));
a->used = 0;
a->size = initialSize;
}
void insertArray(Array *a, int element) {
if (a->used == a->size) {
a->size += 1;
a->array =(int*) realloc(a->array, a->size * sizeof(int));
}
a->array[a->used++] = element;
}
Array initArrayA(){
FILE *fp;
char str[50000];
Array a;
initArray(&a, 1);
/* opening file for reading */
fp = fopen("inp.txt" , "r");
if(fp == NULL) {
printf("%s","error");
return a;
}
while( fgets (str, 50000, fp)!=NULL ) {
/* writing content to stdout */
// printf("%s\n", str);
char* token;
char* rest = str;
while ((token = strtok_r(rest, " , ", &rest)))
insertArray(&a, atoi(token));
}
fclose(fp);
return a;
}
//Asserts for GPU errors
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
printf("GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void global_reduce_kernel(int * d_out, int * d_in, int size)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// do reduction in global mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s && myId < size && myId+s < size)
{
d_in[myId] += d_in[myId + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = d_in[myId];
}
}
__global__ void shmem_reduce_kernel(int * d_out, const int * d_in, int size)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s && myId < size && myId+s < size)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
//kernel to create an intermediate array corresponding to given bins of array B
__global__ void global_count_range_bins_kernel(int * d_bins, int * d_binsin, int size, int x, int y)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// stride is the total number of threads in the grid
// Using stride increases the performance and benefits with scalability & thread reusage
int stride = blockDim.x * gridDim.x;
// assign flags in global memory
for (; myId < size; myId += stride)
{
if (d_binsin[myId] >= x && d_binsin[myId] <= y) {
d_bins[myId] = 1;
} else {
d_bins[myId] = 0;
}
__syncthreads(); // make sure all adds at one stage are done!
}
}
//kernel to perform parallel prefix sum
//assumes only 1 block (1 block can be utilized since we have only 10 elements)
__global__ void prefixsum(int *d_out, int * d_in, int size)
{
extern __shared__ int sh_mem[];
int tid = threadIdx.x;
int myId = blockIdx.x * blockDim.x + threadIdx.x;
sh_mem[tid] = d_in[myId];
__syncthreads();
if (myId < size)
{
for (int d = 1; d < blockDim.x; d *=2)
{
if (tid >= d) {
sh_mem[tid] += sh_mem[tid - d];
}
__syncthreads();
}
}
d_out[myId] = sh_mem[tid];
}
//Function to call corresponding kernel based on memory usage
void reduce(int * d_out, int * d_intermediate, int * d_in,
int size, bool usesSharedMemory)
{
const int maxThreadsPerBlock = 512;
int threads = maxThreadsPerBlock;
// handles non power of 2 arrays
int blocks = ceil(float(size) / float(maxThreadsPerBlock));
if (usesSharedMemory)
{
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>
(d_intermediate, d_in, size);
}
else
{
global_reduce_kernel<<<blocks, threads>>>
(d_intermediate, d_in, size);
gpuErrorCheck( hipPeekAtLastError() );
gpuErrorCheck( hipDeviceSynchronize() );
}
// now we're down to one block left, so reduce it
threads = blocks; // launch one thread for each block in prev step
blocks = 1;
if (usesSharedMemory)
{
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>
(d_out, d_intermediate, size);
}
else
{
global_reduce_kernel<<<blocks, threads>>>
(d_out, d_intermediate, size);
}
}
int main(int argc, char **argv)
{
FILE *q2a;
FILE *q2b;
FILE *q2c;
q2a = fopen("q2a.txt", "w");
q2b = fopen("q2b.txt", "w");
q2c = fopen("q2c.txt", "w");
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(q2a, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
hipSetDevice(dev);
hipDeviceProp_t devProps;
if (hipGetDeviceProperties(&devProps, dev) == 0)
{
fprintf(q2a, "Using device %d:\n", dev);
fprintf(q2a, "%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
// generate the input array on the host
Array A = initArrayA();
int * h_in = A.array;
const int ARRAY_SIZE = A.size;
const int ARRAY_BYTES = A.size * sizeof(int);
fprintf(q2a, "array size is %d\n", ARRAY_SIZE);
// declare GPU memory pointers
int * d_in, * d_intermediate, * d_out, * d_bins, *d_binsin, *prefix_out, *prefix_in;
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_binsin, ARRAY_BYTES);
hipMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated
hipMalloc((void **) &d_bins, ARRAY_BYTES);
hipMalloc((void **) &d_out, sizeof(int));
// allocate memory for prefix sum, it has only 10 buckets
hipMalloc((void **) &prefix_out, 10*sizeof(int));
hipMalloc((void **) &prefix_in, 10*sizeof(int));
const int maxThreadsPerBlock = 512;
int threads = maxThreadsPerBlock;
// handles non power of 2 arrays
int blocks = ceil(float(ARRAY_SIZE) / float(maxThreadsPerBlock));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsedTime;
//Problem 2a - Using Global Memory to get counts(from Reduction)
fprintf(q2a,"Using Global Memory to get counts(from Reduction\n");
// copy back the bin counts from GPU
int b[10];
// transfer the input array to the GPU
gpuErrorCheck( hipMemcpy(d_binsin, h_in, ARRAY_BYTES, hipMemcpyHostToDevice));
//fprintf(q2a, "Running global count\n");
//Bin 1
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 0, 99);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
gpuErrorCheck( hipPeekAtLastError() );
gpuErrorCheck( hipDeviceSynchronize() );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 1 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[0], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 2
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 100, 199);
gpuErrorCheck( hipPeekAtLastError() );
gpuErrorCheck( hipDeviceSynchronize() );
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 2 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[1], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 3
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 200, 299);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 3 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[2], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 4
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 300, 399);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 4 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[3], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 5
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 400, 499);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 5 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[4], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 6
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 500, 599);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 6 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[5], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 7
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 600, 699);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 7 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[6], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 8
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 700, 799);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 8 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[7], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 9
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 800, 899);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 9 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[8], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 10
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 900, 999);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 10 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[9], d_out, sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < 10; i++) {
fprintf(q2a, "Global Memory - count returned by device: %d\n", b[i]);
}
//Problem 2b - Using Shared Memory to get counts(from Reduction)
fprintf(q2b,"Using Shared Memory to get counts(from Reduction\n");
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(q2b, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
dev = 0;
hipSetDevice(dev);
if (hipGetDeviceProperties(&devProps, dev) == 0)
{
fprintf(q2b, "Using device %d:\n", dev);
fprintf(q2b, "%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
fprintf(q2b,"array size is %d\n", ARRAY_SIZE);
// copy back the bin counts from GPU
int s[10];
// transfer the input array to the GPU
hipMemcpy(d_binsin, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
//fprintf(q2b, "Running shared count\n");
//Bin 1
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 0, 99);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 1 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[0], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 2
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 100, 199);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 2 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[1], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 3
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 200, 299);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 3 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[2], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 4
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 300, 399);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 4 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[3], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 5
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 400, 499);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 5 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[4], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 6
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 500, 599);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 6 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[5], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 7
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 600, 699);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 7 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[6], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 8
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 700, 799);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 8 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[7], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 9
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 800, 899);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 9 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[8], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 10
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 900, 999);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 10 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[9], d_out, sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < 10; i++) {
fprintf(q2b, "Shared Memory - count returned by device: %d\n", s[i]);
}
//Problem 2c - Using Parallel Prefix Scan
fprintf(q2c,"Using Parallel Prefix Scan to generate C\n");
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(q2c, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
dev = 0;
hipSetDevice(dev);
if (hipGetDeviceProperties(&devProps, dev) == 0)
{
fprintf(q2c, "Using device %d:\n", dev);
fprintf(q2c, "%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
// transfer the input scan array to the GPU
hipMemcpy(prefix_in, b, 10 * sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(start, 0);
prefixsum<<<1, 10, 10 * sizeof(int)>>>(prefix_out, prefix_in, 10);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
gpuErrorCheck( hipPeekAtLastError() );
gpuErrorCheck( hipDeviceSynchronize() );
// copy back the prefix sum from GPU
int c[10];
hipMemcpy(&c, prefix_out, 10*sizeof(int), hipMemcpyDeviceToHost);
fprintf(q2c, "Prefix Sum - average time elapsed: %f\n", elapsedTime);
for(int i = 0; i < 10; i++) {
fprintf(q2c, "Prefix Sum returned by device: %d\n", c[i]);
}
// free GPU memory allocation
hipFree(d_in);
hipFree(d_intermediate);
hipFree(d_out);
hipFree(d_bins);
hipFree(d_binsin);
hipFree(prefix_out);
hipFree(prefix_in);
return 0;
}
// Reference: https://github.com/manjaripokala/sum20-Parallel-algs/blob/master/cuda-examples/reduce.cu |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //Author: Manjari Pokala
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <math.h>
//Reading array A from input file inp.txt
typedef struct {
int *array;
size_t used;
size_t size;
} Array;
void initArray(Array *a, size_t initialSize) {
a->array = (int*) malloc(initialSize * sizeof(int));
a->used = 0;
a->size = initialSize;
}
void insertArray(Array *a, int element) {
if (a->used == a->size) {
a->size += 1;
a->array =(int*) realloc(a->array, a->size * sizeof(int));
}
a->array[a->used++] = element;
}
Array initArrayA(){
FILE *fp;
char str[50000];
Array a;
initArray(&a, 1);
/* opening file for reading */
fp = fopen("inp.txt" , "r");
if(fp == NULL) {
printf("%s","error");
return a;
}
while( fgets (str, 50000, fp)!=NULL ) {
/* writing content to stdout */
// printf("%s\n", str);
char* token;
char* rest = str;
while ((token = strtok_r(rest, " , ", &rest)))
insertArray(&a, atoi(token));
}
fclose(fp);
return a;
}
//Asserts for GPU errors
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
printf("GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void global_reduce_kernel(int * d_out, int * d_in, int size)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// do reduction in global mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s && myId < size && myId+s < size)
{
d_in[myId] += d_in[myId + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = d_in[myId];
}
}
__global__ void shmem_reduce_kernel(int * d_out, const int * d_in, int size)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s && myId < size && myId+s < size)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
//kernel to create an intermediate array corresponding to given bins of array B
__global__ void global_count_range_bins_kernel(int * d_bins, int * d_binsin, int size, int x, int y)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// stride is the total number of threads in the grid
// Using stride increases the performance and benefits with scalability & thread reusage
int stride = blockDim.x * gridDim.x;
// assign flags in global memory
for (; myId < size; myId += stride)
{
if (d_binsin[myId] >= x && d_binsin[myId] <= y) {
d_bins[myId] = 1;
} else {
d_bins[myId] = 0;
}
__syncthreads(); // make sure all adds at one stage are done!
}
}
//kernel to perform parallel prefix sum
//assumes only 1 block (1 block can be utilized since we have only 10 elements)
__global__ void prefixsum(int *d_out, int * d_in, int size)
{
extern __shared__ int sh_mem[];
int tid = threadIdx.x;
int myId = blockIdx.x * blockDim.x + threadIdx.x;
sh_mem[tid] = d_in[myId];
__syncthreads();
if (myId < size)
{
for (int d = 1; d < blockDim.x; d *=2)
{
if (tid >= d) {
sh_mem[tid] += sh_mem[tid - d];
}
__syncthreads();
}
}
d_out[myId] = sh_mem[tid];
}
//Function to call corresponding kernel based on memory usage
void reduce(int * d_out, int * d_intermediate, int * d_in,
int size, bool usesSharedMemory)
{
const int maxThreadsPerBlock = 512;
int threads = maxThreadsPerBlock;
// handles non power of 2 arrays
int blocks = ceil(float(size) / float(maxThreadsPerBlock));
if (usesSharedMemory)
{
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>
(d_intermediate, d_in, size);
}
else
{
global_reduce_kernel<<<blocks, threads>>>
(d_intermediate, d_in, size);
gpuErrorCheck( hipPeekAtLastError() );
gpuErrorCheck( hipDeviceSynchronize() );
}
// now we're down to one block left, so reduce it
threads = blocks; // launch one thread for each block in prev step
blocks = 1;
if (usesSharedMemory)
{
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>
(d_out, d_intermediate, size);
}
else
{
global_reduce_kernel<<<blocks, threads>>>
(d_out, d_intermediate, size);
}
}
int main(int argc, char **argv)
{
FILE *q2a;
FILE *q2b;
FILE *q2c;
q2a = fopen("q2a.txt", "w");
q2b = fopen("q2b.txt", "w");
q2c = fopen("q2c.txt", "w");
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(q2a, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
hipSetDevice(dev);
hipDeviceProp_t devProps;
if (hipGetDeviceProperties(&devProps, dev) == 0)
{
fprintf(q2a, "Using device %d:\n", dev);
fprintf(q2a, "%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
// generate the input array on the host
Array A = initArrayA();
int * h_in = A.array;
const int ARRAY_SIZE = A.size;
const int ARRAY_BYTES = A.size * sizeof(int);
fprintf(q2a, "array size is %d\n", ARRAY_SIZE);
// declare GPU memory pointers
int * d_in, * d_intermediate, * d_out, * d_bins, *d_binsin, *prefix_out, *prefix_in;
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_binsin, ARRAY_BYTES);
hipMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated
hipMalloc((void **) &d_bins, ARRAY_BYTES);
hipMalloc((void **) &d_out, sizeof(int));
// allocate memory for prefix sum, it has only 10 buckets
hipMalloc((void **) &prefix_out, 10*sizeof(int));
hipMalloc((void **) &prefix_in, 10*sizeof(int));
const int maxThreadsPerBlock = 512;
int threads = maxThreadsPerBlock;
// handles non power of 2 arrays
int blocks = ceil(float(ARRAY_SIZE) / float(maxThreadsPerBlock));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsedTime;
//Problem 2a - Using Global Memory to get counts(from Reduction)
fprintf(q2a,"Using Global Memory to get counts(from Reduction\n");
// copy back the bin counts from GPU
int b[10];
// transfer the input array to the GPU
gpuErrorCheck( hipMemcpy(d_binsin, h_in, ARRAY_BYTES, hipMemcpyHostToDevice));
//fprintf(q2a, "Running global count\n");
//Bin 1
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 0, 99);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
gpuErrorCheck( hipPeekAtLastError() );
gpuErrorCheck( hipDeviceSynchronize() );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 1 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[0], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 2
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 100, 199);
gpuErrorCheck( hipPeekAtLastError() );
gpuErrorCheck( hipDeviceSynchronize() );
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 2 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[1], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 3
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 200, 299);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 3 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[2], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 4
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 300, 399);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 4 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[3], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 5
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 400, 499);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 5 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[4], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 6
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 500, 599);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 6 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[5], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 7
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 600, 699);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 7 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[6], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 8
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 700, 799);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 8 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[7], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 9
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 800, 899);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 9 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[8], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 10
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 900, 999);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, false);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Bin 10 - average time elapsed using global memory: %f\n", elapsedTime);
hipMemcpy(&b[9], d_out, sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < 10; i++) {
fprintf(q2a, "Global Memory - count returned by device: %d\n", b[i]);
}
//Problem 2b - Using Shared Memory to get counts(from Reduction)
fprintf(q2b,"Using Shared Memory to get counts(from Reduction\n");
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(q2b, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
dev = 0;
hipSetDevice(dev);
if (hipGetDeviceProperties(&devProps, dev) == 0)
{
fprintf(q2b, "Using device %d:\n", dev);
fprintf(q2b, "%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
fprintf(q2b,"array size is %d\n", ARRAY_SIZE);
// copy back the bin counts from GPU
int s[10];
// transfer the input array to the GPU
hipMemcpy(d_binsin, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
//fprintf(q2b, "Running shared count\n");
//Bin 1
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 0, 99);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 1 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[0], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 2
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 100, 199);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 2 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[1], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 3
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 200, 299);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 3 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[2], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 4
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 300, 399);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 4 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[3], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 5
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 400, 499);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 5 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[4], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 6
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 500, 599);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 6 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[5], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 7
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 600, 699);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 7 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[6], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 8
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 700, 799);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 8 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[7], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 9
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 800, 899);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 9 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[8], d_out, sizeof(int), hipMemcpyDeviceToHost);
//Bin 10
global_count_range_bins_kernel<<<blocks, threads>>>
(d_bins, d_binsin, ARRAY_SIZE, 900, 999);
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_bins, ARRAY_SIZE, true);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Bin 10 - average time elapsed using shared memory: %f\n", elapsedTime);
hipMemcpy(&s[9], d_out, sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < 10; i++) {
fprintf(q2b, "Shared Memory - count returned by device: %d\n", s[i]);
}
//Problem 2c - Using Parallel Prefix Scan
fprintf(q2c,"Using Parallel Prefix Scan to generate C\n");
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(q2c, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
dev = 0;
hipSetDevice(dev);
if (hipGetDeviceProperties(&devProps, dev) == 0)
{
fprintf(q2c, "Using device %d:\n", dev);
fprintf(q2c, "%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
// transfer the input scan array to the GPU
hipMemcpy(prefix_in, b, 10 * sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(start, 0);
prefixsum<<<1, 10, 10 * sizeof(int)>>>(prefix_out, prefix_in, 10);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
gpuErrorCheck( hipPeekAtLastError() );
gpuErrorCheck( hipDeviceSynchronize() );
// copy back the prefix sum from GPU
int c[10];
hipMemcpy(&c, prefix_out, 10*sizeof(int), hipMemcpyDeviceToHost);
fprintf(q2c, "Prefix Sum - average time elapsed: %f\n", elapsedTime);
for(int i = 0; i < 10; i++) {
fprintf(q2c, "Prefix Sum returned by device: %d\n", c[i]);
}
// free GPU memory allocation
hipFree(d_in);
hipFree(d_intermediate);
hipFree(d_out);
hipFree(d_bins);
hipFree(d_binsin);
hipFree(prefix_out);
hipFree(prefix_in);
return 0;
}
// Reference: https://github.com/manjaripokala/sum20-Parallel-algs/blob/master/cuda-examples/reduce.cu | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z20global_reduce_kernelPiS_i
.globl _Z20global_reduce_kernelPiS_i
.p2align 8
.type _Z20global_reduce_kernelPiS_i,@function
_Z20global_reduce_kernelPiS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[6:7], s[0:1], 0x8
s_mov_b32 s4, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s4, s5, v[0:1]
s_cmp_lt_u32 s5, 2
v_ashrrev_i32_e32 v2, 31, v1
s_cbranch_scc1 .LBB0_5
s_load_b32 s8, s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_mov_b32_e32 v6, 0
v_add_co_u32 v3, vcc_lo, s6, v3
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s8, v1
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s3
s_cmp_lt_u32 s5, 4
s_mov_b32 s5, s9
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_5
.LBB0_3:
s_lshr_b32 s9, s5, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v5, s9, v1
v_cmp_gt_u32_e64 s2, s9, v0
v_cmp_gt_u32_e64 s3, s8, v5
s_delay_alu instid0(VALU_DEP_2)
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_lshlrev_b64 v[7:8], 2, v[5:6]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v7, s2, s6, v7
v_add_co_ci_u32_e64 v8, s2, s7, v8, s2
s_clause 0x1
global_load_b32 v5, v[7:8], off
global_load_b32 v7, v[3:4], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v5, v7, v5
global_store_b32 v[3:4], v5, off
s_branch .LBB0_2
.LBB0_5:
s_set_inst_prefetch_distance 0x2
s_mov_b32 s5, 0
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_7
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_load_b64 s[0:1], s[0:1], 0x0
s_lshl_b64 s[2:3], s[4:5], 2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v0, v[0:1], off
v_mov_b32_e32 v1, 0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
s_waitcnt vmcnt(0)
global_store_b32 v1, v0, s[0:1]
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z20global_reduce_kernelPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z20global_reduce_kernelPiS_i, .Lfunc_end0-_Z20global_reduce_kernelPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z19shmem_reduce_kernelPiPKii
.globl _Z19shmem_reduce_kernelPiPKii
.p2align 8
.type _Z19shmem_reduce_kernelPiPKii,@function
_Z19shmem_reduce_kernelPiPKii:
s_clause 0x1
s_load_b32 s5, s[0:1], 0x24
s_load_b64 s[2:3], s[0:1], 0x8
s_mov_b32 s4, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s5, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s4, s5, v[0:1]
s_cmp_lt_u32 s5, 2
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_load_b32 v3, v[2:3], off
v_lshl_add_u32 v2, v0, 2, 0
s_waitcnt vmcnt(0)
ds_store_b32 v2, v3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB1_5
s_load_b32 s6, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s6, v1
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_3
.p2align 6
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s2
s_cmp_lt_u32 s5, 4
s_mov_b32 s5, s7
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB1_5
.LBB1_3:
s_lshr_b32 s7, s5, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v3, s7, v1
v_cmp_gt_u32_e64 s2, s7, v0
v_cmp_gt_u32_e64 s3, s6, v3
s_delay_alu instid0(VALU_DEP_2)
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB1_2
v_add_nc_u32_e32 v3, s7, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v3, v3, 2, 0
ds_load_b32 v3, v3
ds_load_b32 v4, v2
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v3, v4, v3
ds_store_b32 v2, v3
s_branch .LBB1_2
.LBB1_5:
s_set_inst_prefetch_distance 0x2
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB1_7
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0
s_load_b64 s[0:1], s[0:1], 0x0
s_mov_b32 s5, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[2:3], s[4:5], 2
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v1, v0, s[0:1]
.LBB1_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19shmem_reduce_kernelPiPKii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z19shmem_reduce_kernelPiPKii, .Lfunc_end1-_Z19shmem_reduce_kernelPiPKii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z30global_count_range_bins_kernelPiS_iii
.globl _Z30global_count_range_bins_kernelPiS_iii
.p2align 8
.type _Z30global_count_range_bins_kernelPiS_iii,@function
_Z30global_count_range_bins_kernelPiS_iii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b32 s12, s[0:1], 0x10
s_add_u32 s2, s0, 32
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s12, v1
s_cbranch_execz .LBB2_3
s_load_b32 s9, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x14
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_mul_i32 s8, s9, s8
s_ashr_i32 s9, s8, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[10:11], s[8:9], 2
s_mov_b32 s9, 0
.p2align 6
.LBB2_2:
v_add_co_u32 v4, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v3, vcc_lo
v_add_nc_u32_e32 v1, s8, v1
global_load_b32 v0, v[4:5], off
v_add_co_u32 v4, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, v2, s10
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v3, vcc_lo
v_cmp_le_i32_e32 vcc_lo, s12, v1
s_waitcnt vmcnt(0)
v_cmp_le_i32_e64 s0, s2, v0
v_cmp_ge_i32_e64 s1, s3, v0
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 s0, s0, s1
s_or_b32 s9, vcc_lo, s9
v_cndmask_b32_e64 v0, 0, 1, s0
global_store_b32 v[4:5], v0, off
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB2_2
.LBB2_3:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z30global_count_range_bins_kernelPiS_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z30global_count_range_bins_kernelPiS_iii, .Lfunc_end2-_Z30global_count_range_bins_kernelPiS_iii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z9prefixsumPiS_i
.globl _Z9prefixsumPiS_i
.p2align 8
.type _Z9prefixsumPiS_i,@function
_Z9prefixsumPiS_i:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_load_b64 s[4:5], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_cmp_gt_u32 s2, 1
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
v_cmp_gt_i32_e32 vcc_lo, s3, v1
s_cselect_b32 s4, -1, 0
s_mov_b32 s3, 1
global_load_b32 v4, v[3:4], off
v_lshl_add_u32 v3, v0, 2, 0
s_and_b32 s5, vcc_lo, s4
s_waitcnt vmcnt(0)
ds_store_b32 v3, v4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s4, s5
s_cbranch_execnz .LBB3_3
.LBB3_1:
s_or_b32 exec_lo, exec_lo, s4
s_load_b64 s[0:1], s[0:1], 0x0
ds_load_b32 v3, v3
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v3, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.p2align 6
.LBB3_2:
s_or_b32 exec_lo, exec_lo, s5
s_lshl_b32 s3, s3, 1
s_waitcnt lgkmcnt(0)
s_cmp_lt_u32 s3, s2
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB3_1
.LBB3_3:
s_mov_b32 s5, exec_lo
v_cmpx_le_u32_e64 s3, v0
s_cbranch_execz .LBB3_2
v_subrev_nc_u32_e32 v4, s3, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v4, v4, 2, 0
ds_load_b32 v4, v4
ds_load_b32 v5, v3
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v4, v5, v4
ds_store_b32 v3, v4
s_branch .LBB3_2
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9prefixsumPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z9prefixsumPiS_i, .Lfunc_end3-_Z9prefixsumPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z20global_reduce_kernelPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z20global_reduce_kernelPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19shmem_reduce_kernelPiPKii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19shmem_reduce_kernelPiPKii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z30global_count_range_bins_kernelPiS_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z30global_count_range_bins_kernelPiS_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9prefixsumPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9prefixsumPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9prefixsumPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x001fc800078e0205 */
/*0060*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff047624 */
/* 0x000fca00078e00ff */
/*0090*/ ISETP.GE.U32.AND P0, PT, R4, 0x2, PT ; /* 0x000000020400780c */
/* 0x000fc80003f06070 */
/*00a0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x170], !P0 ; /* 0x00005c0000007a0c */
/* 0x000fe20004706670 */
/*00b0*/ STS [R5.X4], R2 ; /* 0x0000000205007388 */
/* 0x0041e80000004800 */
/*00c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00d0*/ @P0 BRA 0x1a0 ; /* 0x000000c000000947 */
/* 0x000fea0003800000 */
/*00e0*/ HFMA2.MMA R2, -RZ, RZ, 0, 5.9604644775390625e-08 ; /* 0x00000001ff027435 */
/* 0x001fd400000001ff */
/*00f0*/ ISETP.GE.AND P0, PT, R5, R2, PT ; /* 0x000000020500720c */
/* 0x000fe20003f06270 */
/*0100*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fd80003800000 */
/*0110*/ @P0 IMAD.IADD R3, R5, 0x1, -R2 ; /* 0x0000000105030824 */
/* 0x000fe200078e0a02 */
/*0120*/ @P0 LDS R4, [R5.X4] ; /* 0x0000000005040984 */
/* 0x000fe20000004800 */
/*0130*/ SHF.L.U32 R2, R2, 0x1, RZ ; /* 0x0000000102027819 */
/* 0x000fc800000006ff */
/*0140*/ @P0 LDS R3, [R3.X4] ; /* 0x0000000003030984 */
/* 0x000e240000004800 */
/*0150*/ @P0 IMAD.IADD R4, R4, 0x1, R3 ; /* 0x0000000104040824 */
/* 0x001fca00078e0203 */
/*0160*/ @P0 STS [R5.X4], R4 ; /* 0x0000000405000388 */
/* 0x0001e80000004800 */
/*0170*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0180*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x0], PT ; /* 0x0000000002007a0c */
/* 0x000fda0003f06070 */
/*0190*/ @!P0 BRA 0xf0 ; /* 0xffffff5000008947 */
/* 0x001fea000383ffff */
/*01a0*/ LDS R5, [R5.X4] ; /* 0x0000000005057984 */
/* 0x001e220000004800 */
/*01b0*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */
/* 0x000fe40000011400 */
/*01c0*/ LEA R2, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000027a11 */
/* 0x000fc800078010ff */
/*01d0*/ LEA.HI.X R3, R0, c[0x0][0x164], R3, 0x2, P0 ; /* 0x0000590000037a11 */
/* 0x000fca00000f1403 */
/*01e0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*01f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0200*/ BRA 0x200; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z30global_count_range_bins_kernelPiS_iii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0070*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x001fd400000001ff */
/*0080*/ IMAD.WIDE R2, R0, R5, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0205 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff097624 */
/* 0x000fe200078e00ff */
/*00b0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*00c0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fc800078e0205 */
/*00d0*/ IMAD R0, R9, c[0x0][0xc], R0 ; /* 0x0000030009007a24 */
/* 0x000fe200078e0200 */
/*00e0*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x174], PT ; /* 0x00005d0002007a0c */
/* 0x004fc80003f06270 */
/*00f0*/ ISETP.LE.AND P0, PT, R2, c[0x0][0x178], P0 ; /* 0x00005e0002007a0c */
/* 0x000fc80000703270 */
/*0100*/ SEL R7, RZ, 0x1, !P0 ; /* 0x00000001ff077807 */
/* 0x000fe40004000000 */
/*0110*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fc60003f06270 */
/*0120*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001e8000c101904 */
/*0130*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0140*/ @!P0 BRA 0x70 ; /* 0xffffff2000008947 */
/* 0x000fea000383ffff */
/*0150*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0160*/ BRA 0x160; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z19shmem_reduce_kernelPiPKii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R6, c[0x0][0x0], R7 ; /* 0x0000000006007a24 */
/* 0x001fc800078e0207 */
/*0060*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0090*/ ISETP.NE.AND P1, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f25270 */
/*00a0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00b0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf05270 */
/*00c0*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041e80000004800 */
/*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00e0*/ @!P0 BRA 0x1e0 ; /* 0x000000f000008947 */
/* 0x000fea0003800000 */
/*00f0*/ SHF.L.U32 R2, R7, 0x2, RZ ; /* 0x0000000207027819 */
/* 0x001fe200000006ff */
/*0100*/ IMAD.U32 R3, RZ, RZ, UR4 ; /* 0x00000004ff037e24 */
/* 0x000fca000f8e00ff */
/*0110*/ ISETP.GE.U32.AND P0, PT, R7, R3, PT ; /* 0x000000030700720c */
/* 0x000fe20003f06070 */
/*0120*/ IMAD.IADD R4, R0, 0x1, R3 ; /* 0x0000000100047824 */
/* 0x000fc600078e0203 */
/*0130*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fc80000706670 */
/*0140*/ ISETP.GE.U32.OR P0, PT, R4, c[0x0][0x170], P0 ; /* 0x00005c0004007a0c */
/* 0x000fda0000706470 */
/*0150*/ @!P0 LEA R4, R3, R2, 0x2 ; /* 0x0000000203048211 */
/* 0x000fe200078e10ff */
/*0160*/ @!P0 LDS R5, [R7.X4] ; /* 0x0000000007058984 */
/* 0x000fe20000004800 */
/*0170*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc80000011603 */
/*0180*/ @!P0 LDS R4, [R4] ; /* 0x0000000004048984 */
/* 0x000e240000000800 */
/*0190*/ @!P0 IMAD.IADD R5, R5, 0x1, R4 ; /* 0x0000000105058824 */
/* 0x001fca00078e0204 */
/*01a0*/ @!P0 STS [R7.X4], R5 ; /* 0x0000000507008388 */
/* 0x0001e80000004800 */
/*01b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*01c0*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f05270 */
/*01d0*/ @P0 BRA 0x110 ; /* 0xffffff3000000947 */
/* 0x001fea000383ffff */
/*01e0*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x001fea0003800000 */
/*01f0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*0200*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fd400000001ff */
/*0210*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fca00078e0003 */
/*0220*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0230*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0240*/ BRA 0x240; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z20global_reduce_kernelPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0030*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0040*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fe20008011604 */
/*0050*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e220000002100 */
/*0060*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fc80000000a00 */
/*0070*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf05270 */
/*0080*/ IMAD R0, R8, c[0x0][0x0], R9 ; /* 0x0000000008007a24 */
/* 0x001fc800078e0209 */
/*0090*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fd000078e0203 */
/*00a0*/ @!P0 BRA 0x1c0 ; /* 0x0000011000008947 */
/* 0x000fea0003800000 */
/*00b0*/ ISETP.GE.U32.AND P0, PT, R9, UR4, PT ; /* 0x0000000409007c0c */
/* 0x000fe2000bf06070 */
/*00c0*/ BSSY B0, 0x180 ; /* 0x000000b000007945 */
/* 0x000fe20003800000 */
/*00d0*/ IADD3 R4, R0.reuse, UR4, RZ ; /* 0x0000000400047c10 */
/* 0x040fe4000fffe0ff */
/*00e0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fc80000706670 */
/*00f0*/ ISETP.GE.U32.OR P0, PT, R4, c[0x0][0x170], P0 ; /* 0x00005c0004007a0c */
/* 0x000fda0000706470 */
/*0100*/ @P0 BRA 0x170 ; /* 0x0000006000000947 */
/* 0x001fea0003800000 */
/*0110*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0120*/ LDG.E R6, [R2.64] ; /* 0x0000000602067981 */
/* 0x000eb2000c1e1900 */
/*0130*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fcc00078e0005 */
/*0140*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */
/* 0x000ea4000c1e1900 */
/*0150*/ IMAD.IADD R7, R6, 0x1, R5 ; /* 0x0000000106077824 */
/* 0x004fca00078e0205 */
/*0160*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e4000c101906 */
/*0170*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0180*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fe20008011604 */
/*0190*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fea0000010000 */
/*01a0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fda000bf05270 */
/*01b0*/ @P0 BRA 0xb0 ; /* 0xfffffef000000947 */
/* 0x000fea000383ffff */
/*01c0*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fda0003f05270 */
/*01d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*01e0*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x001ea2000c1e1900 */
/*01f0*/ MOV R5, 0x4 ; /* 0x0000000400057802 */
/* 0x000fca0000000f00 */
/*0200*/ IMAD.WIDE.U32 R4, R8, R5, c[0x0][0x160] ; /* 0x0000580008047625 */
/* 0x000fca00078e0005 */
/*0210*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101906 */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z20global_reduce_kernelPiS_i
.globl _Z20global_reduce_kernelPiS_i
.p2align 8
.type _Z20global_reduce_kernelPiS_i,@function
_Z20global_reduce_kernelPiS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[6:7], s[0:1], 0x8
s_mov_b32 s4, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s4, s5, v[0:1]
s_cmp_lt_u32 s5, 2
v_ashrrev_i32_e32 v2, 31, v1
s_cbranch_scc1 .LBB0_5
s_load_b32 s8, s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_mov_b32_e32 v6, 0
v_add_co_u32 v3, vcc_lo, s6, v3
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s8, v1
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s3
s_cmp_lt_u32 s5, 4
s_mov_b32 s5, s9
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_5
.LBB0_3:
s_lshr_b32 s9, s5, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v5, s9, v1
v_cmp_gt_u32_e64 s2, s9, v0
v_cmp_gt_u32_e64 s3, s8, v5
s_delay_alu instid0(VALU_DEP_2)
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_lshlrev_b64 v[7:8], 2, v[5:6]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v7, s2, s6, v7
v_add_co_ci_u32_e64 v8, s2, s7, v8, s2
s_clause 0x1
global_load_b32 v5, v[7:8], off
global_load_b32 v7, v[3:4], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v5, v7, v5
global_store_b32 v[3:4], v5, off
s_branch .LBB0_2
.LBB0_5:
s_set_inst_prefetch_distance 0x2
s_mov_b32 s5, 0
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_7
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_load_b64 s[0:1], s[0:1], 0x0
s_lshl_b64 s[2:3], s[4:5], 2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v0, v[0:1], off
v_mov_b32_e32 v1, 0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
s_waitcnt vmcnt(0)
global_store_b32 v1, v0, s[0:1]
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z20global_reduce_kernelPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z20global_reduce_kernelPiS_i, .Lfunc_end0-_Z20global_reduce_kernelPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z19shmem_reduce_kernelPiPKii
.globl _Z19shmem_reduce_kernelPiPKii
.p2align 8
.type _Z19shmem_reduce_kernelPiPKii,@function
_Z19shmem_reduce_kernelPiPKii:
s_clause 0x1
s_load_b32 s5, s[0:1], 0x24
s_load_b64 s[2:3], s[0:1], 0x8
s_mov_b32 s4, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s5, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s4, s5, v[0:1]
s_cmp_lt_u32 s5, 2
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_load_b32 v3, v[2:3], off
v_lshl_add_u32 v2, v0, 2, 0
s_waitcnt vmcnt(0)
ds_store_b32 v2, v3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB1_5
s_load_b32 s6, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s6, v1
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_3
.p2align 6
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s2
s_cmp_lt_u32 s5, 4
s_mov_b32 s5, s7
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB1_5
.LBB1_3:
s_lshr_b32 s7, s5, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v3, s7, v1
v_cmp_gt_u32_e64 s2, s7, v0
v_cmp_gt_u32_e64 s3, s6, v3
s_delay_alu instid0(VALU_DEP_2)
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB1_2
v_add_nc_u32_e32 v3, s7, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v3, v3, 2, 0
ds_load_b32 v3, v3
ds_load_b32 v4, v2
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v3, v4, v3
ds_store_b32 v2, v3
s_branch .LBB1_2
.LBB1_5:
s_set_inst_prefetch_distance 0x2
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB1_7
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0
s_load_b64 s[0:1], s[0:1], 0x0
s_mov_b32 s5, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[2:3], s[4:5], 2
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v1, v0, s[0:1]
.LBB1_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19shmem_reduce_kernelPiPKii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z19shmem_reduce_kernelPiPKii, .Lfunc_end1-_Z19shmem_reduce_kernelPiPKii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z30global_count_range_bins_kernelPiS_iii
.globl _Z30global_count_range_bins_kernelPiS_iii
.p2align 8
.type _Z30global_count_range_bins_kernelPiS_iii,@function
_Z30global_count_range_bins_kernelPiS_iii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b32 s12, s[0:1], 0x10
s_add_u32 s2, s0, 32
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s12, v1
s_cbranch_execz .LBB2_3
s_load_b32 s9, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x14
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_mul_i32 s8, s9, s8
s_ashr_i32 s9, s8, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[10:11], s[8:9], 2
s_mov_b32 s9, 0
.p2align 6
.LBB2_2:
v_add_co_u32 v4, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v3, vcc_lo
v_add_nc_u32_e32 v1, s8, v1
global_load_b32 v0, v[4:5], off
v_add_co_u32 v4, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, v2, s10
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v3, vcc_lo
v_cmp_le_i32_e32 vcc_lo, s12, v1
s_waitcnt vmcnt(0)
v_cmp_le_i32_e64 s0, s2, v0
v_cmp_ge_i32_e64 s1, s3, v0
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 s0, s0, s1
s_or_b32 s9, vcc_lo, s9
v_cndmask_b32_e64 v0, 0, 1, s0
global_store_b32 v[4:5], v0, off
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB2_2
.LBB2_3:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z30global_count_range_bins_kernelPiS_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z30global_count_range_bins_kernelPiS_iii, .Lfunc_end2-_Z30global_count_range_bins_kernelPiS_iii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z9prefixsumPiS_i
.globl _Z9prefixsumPiS_i
.p2align 8
.type _Z9prefixsumPiS_i,@function
_Z9prefixsumPiS_i:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_load_b64 s[4:5], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_cmp_gt_u32 s2, 1
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
v_cmp_gt_i32_e32 vcc_lo, s3, v1
s_cselect_b32 s4, -1, 0
s_mov_b32 s3, 1
global_load_b32 v4, v[3:4], off
v_lshl_add_u32 v3, v0, 2, 0
s_and_b32 s5, vcc_lo, s4
s_waitcnt vmcnt(0)
ds_store_b32 v3, v4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s4, s5
s_cbranch_execnz .LBB3_3
.LBB3_1:
s_or_b32 exec_lo, exec_lo, s4
s_load_b64 s[0:1], s[0:1], 0x0
ds_load_b32 v3, v3
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v3, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.p2align 6
.LBB3_2:
s_or_b32 exec_lo, exec_lo, s5
s_lshl_b32 s3, s3, 1
s_waitcnt lgkmcnt(0)
s_cmp_lt_u32 s3, s2
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB3_1
.LBB3_3:
s_mov_b32 s5, exec_lo
v_cmpx_le_u32_e64 s3, v0
s_cbranch_execz .LBB3_2
v_subrev_nc_u32_e32 v4, s3, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v4, v4, 2, 0
ds_load_b32 v4, v4
ds_load_b32 v5, v3
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v4, v5, v4
ds_store_b32 v3, v4
s_branch .LBB3_2
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9prefixsumPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z9prefixsumPiS_i, .Lfunc_end3-_Z9prefixsumPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z20global_reduce_kernelPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z20global_reduce_kernelPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19shmem_reduce_kernelPiPKii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19shmem_reduce_kernelPiPKii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z30global_count_range_bins_kernelPiS_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z30global_count_range_bins_kernelPiS_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9prefixsumPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9prefixsumPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
int main(void){
printf("hello world!!\n");
return 0;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
int main(void){
printf("hello world!!\n");
return 0;
} | .file "tmpxft_0019a26d_00000000-6_hello_cuda_world.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "hello world!!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
int main(void){
printf("hello world!!\n");
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(void){
printf("hello world!!\n");
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(void){
printf("hello world!!\n");
return 0;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(void){
printf("hello world!!\n");
return 0;
} | .text
.file "hello_cuda_world.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movl $.Lstr, %edi
callq puts@PLT
xorl %eax, %eax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "hello world!!"
.size .Lstr, 14
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0019a26d_00000000-6_hello_cuda_world.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "hello world!!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "hello_cuda_world.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movl $.Lstr, %edi
callq puts@PLT
xorl %eax, %eax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "hello world!!"
.size .Lstr, 14
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include <cmath>
#include <cassert>
void checkError (cudaError_t err, int line) {
if (err == cudaSuccess) return;
std::cout << "Error code " << err << " : " << cudaGetErrorString(err) << " " << " on line " << line << ", aborting.\n";
assert(false);
}
#define CUDACALL(x) checkError(x, __LINE__)
__global__ void dev_calculate_Gaussians (double* data, double mean, double sigma) {
// EXERCISE: Write this function so that each thread updates one
// index of the array data. The output value should be the Gaussian
// probability of the input value, given mean and sigma.
// (Optionally add a separate array for the output so that the input data
// are not overwritten.)
}
__global__ void dev_reduce_vector (double* data, double* result) {
// EXERCISE: Write this function so it takes the sum of
// the values in data and puts them into result.
// NB: You should assume that the size of data is smaller
// than one block - you need not worry about synchronising
// across blocks.
}
int main (int argc, char** argv) {
int sizeOfVector = atoi(argv[1]);
// EXERCISE: Check that the sizeOfVector variable
// is small enough that the GPU is able to launch
// that many threads in a single block.
double mean = 5;
double sigma = 3;
// Generate a host-side vector and fill it with random numbers.
double* host_data = new double[sizeOfVector];
for (int i = 0; i < sizeOfVector; ++i) {
host_data[i] = (rand() % 11) - 5;
}
// Host-side numbers to check against device-side ones.
double* host_probs = new double[sizeOfVector];
double host_sum = 0;
for (int i = 0; i < sizeOfVector; ++i) {
host_probs[i] = exp(-0.5 * pow((host_data[i] - mean) / sigma, 2));
host_probs[i] /= (sigma * sqrt(2*M_PI));
host_sum += host_probs[i];
}
double* dev_data = 0;
// EXERCISE: Create a device-side array with sizeOfVector elements and copy the host data into it.
// EXERCISE: Launch a one-block kernel which will run the method dev_calculate_Gaussians
// on each element of dev_data.
// EXERCISE: Copy back the results of the calculation into host_data.
// Check for reasonableness
double tolerance = 1e-6;
for (int i = 0; i < sizeOfVector; ++i) {
if (fabs(host_data[i] - host_probs[i]) <= tolerance) continue;
std::cout << "Problem with entry " << i << ": "
<< host_probs[i] << " " << host_data[i] << " "
<< (host_probs[i] - host_data[i])
<< std::endl;
}
std::cout << "Sum from CPU: " << host_sum << std::endl;
double* device_sum = 0;
// EXERCISE: Allocate a single double on the device, putting its address into device_sum.
// EXERCISE: Launch a kernel to sum the elements of dev_data and put the result in device_sum.
// EXERCISE: Copy the result back into host_sum.
std::cout << "Sum from GPU: " << host_sum << std::endl;
return 0;
} | code for sm_80
Function : _Z17dev_reduce_vectorPdS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z23dev_calculate_GaussiansPddd
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <cmath>
#include <cassert>
void checkError (cudaError_t err, int line) {
if (err == cudaSuccess) return;
std::cout << "Error code " << err << " : " << cudaGetErrorString(err) << " " << " on line " << line << ", aborting.\n";
assert(false);
}
#define CUDACALL(x) checkError(x, __LINE__)
__global__ void dev_calculate_Gaussians (double* data, double mean, double sigma) {
// EXERCISE: Write this function so that each thread updates one
// index of the array data. The output value should be the Gaussian
// probability of the input value, given mean and sigma.
// (Optionally add a separate array for the output so that the input data
// are not overwritten.)
}
__global__ void dev_reduce_vector (double* data, double* result) {
// EXERCISE: Write this function so it takes the sum of
// the values in data and puts them into result.
// NB: You should assume that the size of data is smaller
// than one block - you need not worry about synchronising
// across blocks.
}
int main (int argc, char** argv) {
int sizeOfVector = atoi(argv[1]);
// EXERCISE: Check that the sizeOfVector variable
// is small enough that the GPU is able to launch
// that many threads in a single block.
double mean = 5;
double sigma = 3;
// Generate a host-side vector and fill it with random numbers.
double* host_data = new double[sizeOfVector];
for (int i = 0; i < sizeOfVector; ++i) {
host_data[i] = (rand() % 11) - 5;
}
// Host-side numbers to check against device-side ones.
double* host_probs = new double[sizeOfVector];
double host_sum = 0;
for (int i = 0; i < sizeOfVector; ++i) {
host_probs[i] = exp(-0.5 * pow((host_data[i] - mean) / sigma, 2));
host_probs[i] /= (sigma * sqrt(2*M_PI));
host_sum += host_probs[i];
}
double* dev_data = 0;
// EXERCISE: Create a device-side array with sizeOfVector elements and copy the host data into it.
// EXERCISE: Launch a one-block kernel which will run the method dev_calculate_Gaussians
// on each element of dev_data.
// EXERCISE: Copy back the results of the calculation into host_data.
// Check for reasonableness
double tolerance = 1e-6;
for (int i = 0; i < sizeOfVector; ++i) {
if (fabs(host_data[i] - host_probs[i]) <= tolerance) continue;
std::cout << "Problem with entry " << i << ": "
<< host_probs[i] << " " << host_data[i] << " "
<< (host_probs[i] - host_data[i])
<< std::endl;
}
std::cout << "Sum from CPU: " << host_sum << std::endl;
double* device_sum = 0;
// EXERCISE: Allocate a single double on the device, putting its address into device_sum.
// EXERCISE: Launch a kernel to sum the elements of dev_data and put the result in device_sum.
// EXERCISE: Copy the result back into host_sum.
std::cout << "Sum from GPU: " << host_sum << std::endl;
return 0;
} | .file "tmpxft_00044175_00000000-6_exercise1.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Error code "
.LC1:
.string " : "
.LC2:
.string " "
.LC3:
.string " on line "
.LC4:
.string ", aborting.\n"
.text
.globl _Z10checkError9cudaErrori
.type _Z10checkError9cudaErrori, @function
_Z10checkError9cudaErrori:
.LFB3669:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L11
ret
.L11:
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl %edi, %ebx
movl %esi, %r12d
movl $11, %edx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rbp
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %ebx, %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $3, %edx
leaq .LC1(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %ebx, %edi
call cudaGetErrorString@PLT
movq %rax, %rbx
testq %rax, %rax
je .L12
movq %rax, %rdi
call strlen@PLT
movq %rax, %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
.L6:
movl $1, %edx
leaq .LC2(%rip), %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl $9, %edx
leaq .LC3(%rip), %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %r12d, %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $12, %edx
leaq .LC4(%rip), %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
movq 0(%rbp), %rax
movq %rbp, %rdi
addq -24(%rax), %rdi
movl 32(%rdi), %esi
orl $1, %esi
call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT
jmp .L6
.cfi_endproc
.LFE3669:
.size _Z10checkError9cudaErrori, .-_Z10checkError9cudaErrori
.section .rodata.str1.1
.LC12:
.string "Problem with entry "
.LC13:
.string ": "
.LC14:
.string "Sum from CPU: "
.LC15:
.string "Sum from GPU: "
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movslq %eax, %r12
movq %r12, %rax
shrq $60, %rax
jne .L14
salq $3, %r12
movq %r12, %rdi
call _Znam@PLT
movq %rax, %r13
testl %ebp, %ebp
jle .L28
movq %rax, %rbx
leal -1(%rbp), %r15d
leaq 8(%rax,%r15,8), %rbp
.L17:
call rand@PLT
movslq %eax, %rdx
imulq $780903145, %rdx, %rdx
sarq $33, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %ecx
leal (%rdx,%rcx,2), %edx
subl %edx, %eax
subl $5, %eax
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
movsd %xmm0, (%rbx)
addq $8, %rbx
cmpq %rbx, %rbp
jne .L17
movq %r12, %rdi
call _Znam@PLT
movq %rax, %r14
movl $0, %ebx
movq $0x000000000, 8(%rsp)
.L18:
movsd 0(%r13,%rbx,8), %xmm0
subsd .LC6(%rip), %xmm0
divsd .LC7(%rip), %xmm0
mulsd %xmm0, %xmm0
mulsd .LC8(%rip), %xmm0
call exp@PLT
divsd .LC9(%rip), %xmm0
movsd %xmm0, (%r14,%rbx,8)
addsd 8(%rsp), %xmm0
movsd %xmm0, 8(%rsp)
movq %rbx, %rax
addq $1, %rbx
cmpq %r15, %rax
jne .L18
movl $0, %ebx
jmp .L23
.L28:
movq $0x000000000, 8(%rsp)
.L16:
leaq .LC14(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movsd 8(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC15(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movsd 8(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call __cxa_throw_bad_array_new_length@PLT
.L29:
call _ZSt16__throw_bad_castv@PLT
.L21:
movq %r12, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r12), %rax
movl $10, %esi
movq %r12, %rdi
call *48(%rax)
movl %eax, %esi
.L22:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
.L19:
leaq 1(%rbx), %rax
cmpq %r15, %rbx
je .L16
movq %rax, %rbx
.L23:
movsd 0(%r13,%rbx,8), %xmm1
movsd (%r14,%rbx,8), %xmm2
movsd %xmm1, 16(%rsp)
movsd %xmm2, 24(%rsp)
subsd %xmm2, %xmm1
movapd %xmm1, %xmm0
andpd .LC10(%rip), %xmm0
movsd .LC11(%rip), %xmm4
comisd %xmm0, %xmm4
jnb .L19
movl $19, %edx
leaq .LC12(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %ebx, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $2, %edx
leaq .LC13(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movsd 24(%rsp), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rbp
movl $1, %edx
leaq .LC2(%rip), %r12
movq %r12, %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movsd 16(%rsp), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rbp
movl $1, %edx
movq %r12, %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movsd 24(%rsp), %xmm0
subsd 16(%rsp), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rbp
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %r12
testq %r12, %r12
je .L29
cmpb $0, 56(%r12)
je .L21
movzbl 67(%r12), %esi
jmp .L22
.cfi_endproc
.LFE3670:
.size main, .-main
.globl _Z45__device_stub__Z23dev_calculate_GaussiansPdddPddd
.type _Z45__device_stub__Z23dev_calculate_GaussiansPdddPddd, @function
_Z45__device_stub__Z23dev_calculate_GaussiansPdddPddd:
.LFB3695:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movsd %xmm0, 16(%rsp)
movsd %xmm1, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L34
.L30:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L35
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L34:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23dev_calculate_GaussiansPddd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L30
.L35:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z45__device_stub__Z23dev_calculate_GaussiansPdddPddd, .-_Z45__device_stub__Z23dev_calculate_GaussiansPdddPddd
.globl _Z23dev_calculate_GaussiansPddd
.type _Z23dev_calculate_GaussiansPddd, @function
_Z23dev_calculate_GaussiansPddd:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z23dev_calculate_GaussiansPdddPddd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z23dev_calculate_GaussiansPddd, .-_Z23dev_calculate_GaussiansPddd
.globl _Z39__device_stub__Z17dev_reduce_vectorPdS_PdS_
.type _Z39__device_stub__Z17dev_reduce_vectorPdS_PdS_, @function
_Z39__device_stub__Z17dev_reduce_vectorPdS_PdS_:
.LFB3697:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L42
.L38:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L43
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L42:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z17dev_reduce_vectorPdS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L38
.L43:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3697:
.size _Z39__device_stub__Z17dev_reduce_vectorPdS_PdS_, .-_Z39__device_stub__Z17dev_reduce_vectorPdS_PdS_
.globl _Z17dev_reduce_vectorPdS_
.type _Z17dev_reduce_vectorPdS_, @function
_Z17dev_reduce_vectorPdS_:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z17dev_reduce_vectorPdS_PdS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _Z17dev_reduce_vectorPdS_, .-_Z17dev_reduce_vectorPdS_
.section .rodata.str1.1
.LC16:
.string "_Z17dev_reduce_vectorPdS_"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC17:
.string "_Z23dev_calculate_GaussiansPddd"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3700:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC16(%rip), %rdx
movq %rdx, %rcx
leaq _Z17dev_reduce_vectorPdS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC17(%rip), %rdx
movq %rdx, %rcx
leaq _Z23dev_calculate_GaussiansPddd(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3700:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC6:
.long 0
.long 1075052544
.align 8
.LC7:
.long 0
.long 1074266112
.align 8
.LC8:
.long 0
.long -1075838976
.align 8
.LC9:
.long -1343145336
.long 1075713116
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC10:
.long -1
.long 2147483647
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC11:
.long -1598689907
.long 1051772663
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <cmath>
#include <cassert>
void checkError (cudaError_t err, int line) {
if (err == cudaSuccess) return;
std::cout << "Error code " << err << " : " << cudaGetErrorString(err) << " " << " on line " << line << ", aborting.\n";
assert(false);
}
#define CUDACALL(x) checkError(x, __LINE__)
__global__ void dev_calculate_Gaussians (double* data, double mean, double sigma) {
// EXERCISE: Write this function so that each thread updates one
// index of the array data. The output value should be the Gaussian
// probability of the input value, given mean and sigma.
// (Optionally add a separate array for the output so that the input data
// are not overwritten.)
}
__global__ void dev_reduce_vector (double* data, double* result) {
// EXERCISE: Write this function so it takes the sum of
// the values in data and puts them into result.
// NB: You should assume that the size of data is smaller
// than one block - you need not worry about synchronising
// across blocks.
}
int main (int argc, char** argv) {
int sizeOfVector = atoi(argv[1]);
// EXERCISE: Check that the sizeOfVector variable
// is small enough that the GPU is able to launch
// that many threads in a single block.
double mean = 5;
double sigma = 3;
// Generate a host-side vector and fill it with random numbers.
double* host_data = new double[sizeOfVector];
for (int i = 0; i < sizeOfVector; ++i) {
host_data[i] = (rand() % 11) - 5;
}
// Host-side numbers to check against device-side ones.
double* host_probs = new double[sizeOfVector];
double host_sum = 0;
for (int i = 0; i < sizeOfVector; ++i) {
host_probs[i] = exp(-0.5 * pow((host_data[i] - mean) / sigma, 2));
host_probs[i] /= (sigma * sqrt(2*M_PI));
host_sum += host_probs[i];
}
double* dev_data = 0;
// EXERCISE: Create a device-side array with sizeOfVector elements and copy the host data into it.
// EXERCISE: Launch a one-block kernel which will run the method dev_calculate_Gaussians
// on each element of dev_data.
// EXERCISE: Copy back the results of the calculation into host_data.
// Check for reasonableness
double tolerance = 1e-6;
for (int i = 0; i < sizeOfVector; ++i) {
if (fabs(host_data[i] - host_probs[i]) <= tolerance) continue;
std::cout << "Problem with entry " << i << ": "
<< host_probs[i] << " " << host_data[i] << " "
<< (host_probs[i] - host_data[i])
<< std::endl;
}
std::cout << "Sum from CPU: " << host_sum << std::endl;
double* device_sum = 0;
// EXERCISE: Allocate a single double on the device, putting its address into device_sum.
// EXERCISE: Launch a kernel to sum the elements of dev_data and put the result in device_sum.
// EXERCISE: Copy the result back into host_sum.
std::cout << "Sum from GPU: " << host_sum << std::endl;
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
#include <cmath>
#include <cassert>
void checkError (hipError_t err, int line) {
if (err == hipSuccess) return;
std::cout << "Error code " << err << " : " << hipGetErrorString(err) << " " << " on line " << line << ", aborting.\n";
assert(false);
}
#define CUDACALL(x) checkError(x, __LINE__)
__global__ void dev_calculate_Gaussians (double* data, double mean, double sigma) {
// EXERCISE: Write this function so that each thread updates one
// index of the array data. The output value should be the Gaussian
// probability of the input value, given mean and sigma.
// (Optionally add a separate array for the output so that the input data
// are not overwritten.)
}
__global__ void dev_reduce_vector (double* data, double* result) {
// EXERCISE: Write this function so it takes the sum of
// the values in data and puts them into result.
// NB: You should assume that the size of data is smaller
// than one block - you need not worry about synchronising
// across blocks.
}
int main (int argc, char** argv) {
int sizeOfVector = atoi(argv[1]);
// EXERCISE: Check that the sizeOfVector variable
// is small enough that the GPU is able to launch
// that many threads in a single block.
double mean = 5;
double sigma = 3;
// Generate a host-side vector and fill it with random numbers.
double* host_data = new double[sizeOfVector];
for (int i = 0; i < sizeOfVector; ++i) {
host_data[i] = (rand() % 11) - 5;
}
// Host-side numbers to check against device-side ones.
double* host_probs = new double[sizeOfVector];
double host_sum = 0;
for (int i = 0; i < sizeOfVector; ++i) {
host_probs[i] = exp(-0.5 * pow((host_data[i] - mean) / sigma, 2));
host_probs[i] /= (sigma * sqrt(2*M_PI));
host_sum += host_probs[i];
}
double* dev_data = 0;
// EXERCISE: Create a device-side array with sizeOfVector elements and copy the host data into it.
// EXERCISE: Launch a one-block kernel which will run the method dev_calculate_Gaussians
// on each element of dev_data.
// EXERCISE: Copy back the results of the calculation into host_data.
// Check for reasonableness
double tolerance = 1e-6;
for (int i = 0; i < sizeOfVector; ++i) {
if (fabs(host_data[i] - host_probs[i]) <= tolerance) continue;
std::cout << "Problem with entry " << i << ": "
<< host_probs[i] << " " << host_data[i] << " "
<< (host_probs[i] - host_data[i])
<< std::endl;
}
std::cout << "Sum from CPU: " << host_sum << std::endl;
double* device_sum = 0;
// EXERCISE: Allocate a single double on the device, putting its address into device_sum.
// EXERCISE: Launch a kernel to sum the elements of dev_data and put the result in device_sum.
// EXERCISE: Copy the result back into host_sum.
std::cout << "Sum from GPU: " << host_sum << std::endl;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#include <cmath>
#include <cassert>
void checkError (hipError_t err, int line) {
if (err == hipSuccess) return;
std::cout << "Error code " << err << " : " << hipGetErrorString(err) << " " << " on line " << line << ", aborting.\n";
assert(false);
}
#define CUDACALL(x) checkError(x, __LINE__)
__global__ void dev_calculate_Gaussians (double* data, double mean, double sigma) {
// EXERCISE: Write this function so that each thread updates one
// index of the array data. The output value should be the Gaussian
// probability of the input value, given mean and sigma.
// (Optionally add a separate array for the output so that the input data
// are not overwritten.)
}
__global__ void dev_reduce_vector (double* data, double* result) {
// EXERCISE: Write this function so it takes the sum of
// the values in data and puts them into result.
// NB: You should assume that the size of data is smaller
// than one block - you need not worry about synchronising
// across blocks.
}
int main (int argc, char** argv) {
int sizeOfVector = atoi(argv[1]);
// EXERCISE: Check that the sizeOfVector variable
// is small enough that the GPU is able to launch
// that many threads in a single block.
double mean = 5;
double sigma = 3;
// Generate a host-side vector and fill it with random numbers.
double* host_data = new double[sizeOfVector];
for (int i = 0; i < sizeOfVector; ++i) {
host_data[i] = (rand() % 11) - 5;
}
// Host-side numbers to check against device-side ones.
double* host_probs = new double[sizeOfVector];
double host_sum = 0;
for (int i = 0; i < sizeOfVector; ++i) {
host_probs[i] = exp(-0.5 * pow((host_data[i] - mean) / sigma, 2));
host_probs[i] /= (sigma * sqrt(2*M_PI));
host_sum += host_probs[i];
}
double* dev_data = 0;
// EXERCISE: Create a device-side array with sizeOfVector elements and copy the host data into it.
// EXERCISE: Launch a one-block kernel which will run the method dev_calculate_Gaussians
// on each element of dev_data.
// EXERCISE: Copy back the results of the calculation into host_data.
// Check for reasonableness
double tolerance = 1e-6;
for (int i = 0; i < sizeOfVector; ++i) {
if (fabs(host_data[i] - host_probs[i]) <= tolerance) continue;
std::cout << "Problem with entry " << i << ": "
<< host_probs[i] << " " << host_data[i] << " "
<< (host_probs[i] - host_data[i])
<< std::endl;
}
std::cout << "Sum from CPU: " << host_sum << std::endl;
double* device_sum = 0;
// EXERCISE: Allocate a single double on the device, putting its address into device_sum.
// EXERCISE: Launch a kernel to sum the elements of dev_data and put the result in device_sum.
// EXERCISE: Copy the result back into host_sum.
std::cout << "Sum from GPU: " << host_sum << std::endl;
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23dev_calculate_GaussiansPddd
.globl _Z23dev_calculate_GaussiansPddd
.p2align 8
.type _Z23dev_calculate_GaussiansPddd,@function
_Z23dev_calculate_GaussiansPddd:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23dev_calculate_GaussiansPddd
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23dev_calculate_GaussiansPddd, .Lfunc_end0-_Z23dev_calculate_GaussiansPddd
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z17dev_reduce_vectorPdS_
.globl _Z17dev_reduce_vectorPdS_
.p2align 8
.type _Z17dev_reduce_vectorPdS_,@function
_Z17dev_reduce_vectorPdS_:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17dev_reduce_vectorPdS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 16
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z17dev_reduce_vectorPdS_, .Lfunc_end1-_Z17dev_reduce_vectorPdS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 8
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23dev_calculate_GaussiansPddd
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z23dev_calculate_GaussiansPddd.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 16
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17dev_reduce_vectorPdS_
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z17dev_reduce_vectorPdS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#include <cmath>
#include <cassert>
void checkError (hipError_t err, int line) {
if (err == hipSuccess) return;
std::cout << "Error code " << err << " : " << hipGetErrorString(err) << " " << " on line " << line << ", aborting.\n";
assert(false);
}
#define CUDACALL(x) checkError(x, __LINE__)
__global__ void dev_calculate_Gaussians (double* data, double mean, double sigma) {
// EXERCISE: Write this function so that each thread updates one
// index of the array data. The output value should be the Gaussian
// probability of the input value, given mean and sigma.
// (Optionally add a separate array for the output so that the input data
// are not overwritten.)
}
__global__ void dev_reduce_vector (double* data, double* result) {
// EXERCISE: Write this function so it takes the sum of
// the values in data and puts them into result.
// NB: You should assume that the size of data is smaller
// than one block - you need not worry about synchronising
// across blocks.
}
int main (int argc, char** argv) {
int sizeOfVector = atoi(argv[1]);
// EXERCISE: Check that the sizeOfVector variable
// is small enough that the GPU is able to launch
// that many threads in a single block.
double mean = 5;
double sigma = 3;
// Generate a host-side vector and fill it with random numbers.
double* host_data = new double[sizeOfVector];
for (int i = 0; i < sizeOfVector; ++i) {
host_data[i] = (rand() % 11) - 5;
}
// Host-side numbers to check against device-side ones.
double* host_probs = new double[sizeOfVector];
double host_sum = 0;
for (int i = 0; i < sizeOfVector; ++i) {
host_probs[i] = exp(-0.5 * pow((host_data[i] - mean) / sigma, 2));
host_probs[i] /= (sigma * sqrt(2*M_PI));
host_sum += host_probs[i];
}
double* dev_data = 0;
// EXERCISE: Create a device-side array with sizeOfVector elements and copy the host data into it.
// EXERCISE: Launch a one-block kernel which will run the method dev_calculate_Gaussians
// on each element of dev_data.
// EXERCISE: Copy back the results of the calculation into host_data.
// Check for reasonableness
double tolerance = 1e-6;
for (int i = 0; i < sizeOfVector; ++i) {
if (fabs(host_data[i] - host_probs[i]) <= tolerance) continue;
std::cout << "Problem with entry " << i << ": "
<< host_probs[i] << " " << host_data[i] << " "
<< (host_probs[i] - host_data[i])
<< std::endl;
}
std::cout << "Sum from CPU: " << host_sum << std::endl;
double* device_sum = 0;
// EXERCISE: Allocate a single double on the device, putting its address into device_sum.
// EXERCISE: Launch a kernel to sum the elements of dev_data and put the result in device_sum.
// EXERCISE: Copy the result back into host_sum.
std::cout << "Sum from GPU: " << host_sum << std::endl;
return 0;
} | .text
.file "exercise1.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z10checkError10hipError_ti # -- Begin function _Z10checkError10hipError_ti
.p2align 4, 0x90
.type _Z10checkError10hipError_ti,@function
_Z10checkError10hipError_ti: # @_Z10checkError10hipError_ti
.cfi_startproc
# %bb.0:
testl %edi, %edi
je .LBB0_5
# %bb.1:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebx
movl %edi, %ebp
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $11, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %ebp, %esi
callq _ZNSolsEi
movq %rax, %r14
movl $.L.str.1, %esi
movl $3, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl %ebp, %edi
callq hipGetErrorString
testq %rax, %rax
je .LBB0_2
# %bb.3:
movq %rax, %rdi
movq %rax, %r15
callq strlen
movq %r14, %rdi
movq %r15, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB0_4
.LBB0_5:
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %rbp
.cfi_restore %r14
.cfi_restore %r15
retq
.LBB0_2:
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %rbp, -16
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq (%r14), %rax
movq -24(%rax), %rax
movq %r14, %rdi
addq %rax, %rdi
movl 32(%r14,%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB0_4: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
movl $.L.str.2, %esi
movl $1, %edx
movq %r14, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.3, %esi
movl $9, %edx
movq %r14, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r14, %rdi
movl %ebx, %esi
callq _ZNSolsEi
movl $.L.str.4, %esi
movl $12, %edx
movq %rax, %rdi
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l # TAILCALL
.Lfunc_end0:
.size _Z10checkError10hipError_ti, .Lfunc_end0-_Z10checkError10hipError_ti
.cfi_endproc
# -- End function
.globl _Z38__device_stub__dev_calculate_GaussiansPddd # -- Begin function _Z38__device_stub__dev_calculate_GaussiansPddd
.p2align 4, 0x90
.type _Z38__device_stub__dev_calculate_GaussiansPddd,@function
_Z38__device_stub__dev_calculate_GaussiansPddd: # @_Z38__device_stub__dev_calculate_GaussiansPddd
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movsd %xmm0, 64(%rsp)
movsd %xmm1, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23dev_calculate_GaussiansPddd, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z38__device_stub__dev_calculate_GaussiansPddd, .Lfunc_end1-_Z38__device_stub__dev_calculate_GaussiansPddd
.cfi_endproc
# -- End function
.globl _Z32__device_stub__dev_reduce_vectorPdS_ # -- Begin function _Z32__device_stub__dev_reduce_vectorPdS_
.p2align 4, 0x90
.type _Z32__device_stub__dev_reduce_vectorPdS_,@function
_Z32__device_stub__dev_reduce_vectorPdS_: # @_Z32__device_stub__dev_reduce_vectorPdS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z17dev_reduce_vectorPdS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end2:
.size _Z32__device_stub__dev_reduce_vectorPdS_, .Lfunc_end2-_Z32__device_stub__dev_reduce_vectorPdS_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0xc014000000000000 # double -5
.LCPI3_1:
.quad 0x4008000000000000 # double 3
.LCPI3_2:
.quad 0xbfe0000000000000 # double -0.5
.LCPI3_3:
.quad 0x401e145caff13a88 # double 7.5198848238930011
.LCPI3_5:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI3_4:
.quad 0x7fffffffffffffff # double NaN
.quad 0x7fffffffffffffff # double NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
movslq %r15d, %r12
leaq (,%r12,8), %rax
testl %r12d, %r12d
movq $-1, %r14
cmovnsq %rax, %r14
movq %r14, %rdi
callq _Znam
movq %rax, %rbx
testl %r12d, %r12d
jle .LBB3_3
# %bb.1: # %.lr.ph.preheader
movl %r15d, %r12d
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $780903145, %rax, %rcx # imm = 0x2E8BA2E9
movq %rcx, %rdx
shrq $63, %rdx
sarq $33, %rcx
addl %edx, %ecx
leal (%rcx,%rcx,4), %edx
leal (%rcx,%rdx,2), %ecx
negl %ecx
addl %ecx, %eax
addl $-5, %eax
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
movsd %xmm0, (%rbx,%r13,8)
incq %r13
cmpq %r13, %r12
jne .LBB3_2
.LBB3_3: # %._crit_edge
movq %r14, %rdi
callq _Znam
movq %rax, %r14
testl %r15d, %r15d
jle .LBB3_4
# %bb.11: # %.lr.ph59.preheader
movl %r15d, %r12d
xorpd %xmm0, %xmm0
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB3_12: # %.lr.ph59
# =>This Inner Loop Header: Depth=1
movsd %xmm0, (%rsp) # 8-byte Spill
movsd (%rbx,%r13,8), %xmm0 # xmm0 = mem[0],zero
addsd .LCPI3_0(%rip), %xmm0
divsd .LCPI3_1(%rip), %xmm0
mulsd %xmm0, %xmm0
mulsd .LCPI3_2(%rip), %xmm0
callq exp
divsd .LCPI3_3(%rip), %xmm0
movsd %xmm0, (%r14,%r13,8)
movsd (%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
addsd %xmm0, %xmm1
movsd %xmm1, (%rsp) # 8-byte Spill
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
incq %r13
cmpq %r13, %r12
jne .LBB3_12
jmp .LBB3_5
.LBB3_4:
xorpd %xmm0, %xmm0
.LBB3_5: # %.preheader
movsd %xmm0, (%rsp) # 8-byte Spill
testl %r15d, %r15d
jle .LBB3_13
# %bb.6: # %.lr.ph61.preheader
movl %r15d, %r13d
xorl %r15d, %r15d
movapd .LCPI3_4(%rip), %xmm1 # xmm1 = [NaN,NaN]
movsd .LCPI3_5(%rip), %xmm2 # xmm2 = mem[0],zero
jmp .LBB3_7
.LBB3_22: # in Loop: Header=BB3_7 Depth=1
movq %r12, %rdi
movq %rax, %rbp
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbp, %rax
.LBB3_23: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit54
# in Loop: Header=BB3_7 Depth=1
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movapd .LCPI3_4(%rip), %xmm1 # xmm1 = [NaN,NaN]
movsd .LCPI3_5(%rip), %xmm2 # xmm2 = mem[0],zero
.LBB3_24: # in Loop: Header=BB3_7 Depth=1
incq %r15
cmpq %r15, %r13
je .LBB3_13
.LBB3_7: # %.lr.ph61
# =>This Inner Loop Header: Depth=1
movsd (%rbx,%r15,8), %xmm4 # xmm4 = mem[0],zero
movsd (%r14,%r15,8), %xmm3 # xmm3 = mem[0],zero
movapd %xmm4, %xmm0
subsd %xmm3, %xmm0
andpd %xmm1, %xmm0
ucomisd %xmm0, %xmm2
jae .LBB3_24
# %bb.8: # in Loop: Header=BB3_7 Depth=1
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $19, %edx
movsd %xmm3, 8(%rsp) # 8-byte Spill
movsd %xmm4, 16(%rsp) # 8-byte Spill
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r15d, %esi
callq _ZNSolsEi
movq %rax, %r12
movl $.L.str.6, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r12, %rdi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq %rax, %r12
movl $.L.str.2, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r12, %rdi
movsd 16(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq %rax, %r12
movl $.L.str.2, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
subsd 16(%rsp), %xmm0 # 8-byte Folded Reload
movq %r12, %rdi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB3_25
# %bb.9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i51
# in Loop: Header=BB3_7 Depth=1
cmpb $0, 56(%r12)
je .LBB3_22
# %bb.10: # in Loop: Header=BB3_7 Depth=1
movzbl 67(%r12), %ecx
jmp .LBB3_23
.LBB3_13: # %._crit_edge62
movl $_ZSt4cout, %edi
movl $.L.str.7, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB3_25
# %bb.14: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB3_16
# %bb.15:
movzbl 67(%rbx), %ecx
jmp .LBB3_17
.LBB3_16:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB3_17: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.8, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB3_25
# %bb.18: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i46
cmpb $0, 56(%rbx)
je .LBB3_20
# %bb.19:
movzbl 67(%rbx), %ecx
jmp .LBB3_21
.LBB3_20:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB3_21: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit49
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_25:
.cfi_def_cfa_offset 80
callq _ZSt16__throw_bad_castv
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23dev_calculate_GaussiansPddd, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17dev_reduce_vectorPdS_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error code "
.size .L.str, 12
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " : "
.size .L.str.1, 4
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz " "
.size .L.str.2, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz " on line "
.size .L.str.3, 10
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz ", aborting.\n"
.size .L.str.4, 13
.type _Z23dev_calculate_GaussiansPddd,@object # @_Z23dev_calculate_GaussiansPddd
.section .rodata,"a",@progbits
.globl _Z23dev_calculate_GaussiansPddd
.p2align 3, 0x0
_Z23dev_calculate_GaussiansPddd:
.quad _Z38__device_stub__dev_calculate_GaussiansPddd
.size _Z23dev_calculate_GaussiansPddd, 8
.type _Z17dev_reduce_vectorPdS_,@object # @_Z17dev_reduce_vectorPdS_
.globl _Z17dev_reduce_vectorPdS_
.p2align 3, 0x0
_Z17dev_reduce_vectorPdS_:
.quad _Z32__device_stub__dev_reduce_vectorPdS_
.size _Z17dev_reduce_vectorPdS_, 8
.type .L.str.5,@object # @.str.5
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.5:
.asciz "Problem with entry "
.size .L.str.5, 20
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz ": "
.size .L.str.6, 3
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Sum from CPU: "
.size .L.str.7, 15
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Sum from GPU: "
.size .L.str.8, 15
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z23dev_calculate_GaussiansPddd"
.size .L__unnamed_1, 32
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z17dev_reduce_vectorPdS_"
.size .L__unnamed_2, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__dev_calculate_GaussiansPddd
.addrsig_sym _Z32__device_stub__dev_reduce_vectorPdS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZSt4cout
.addrsig_sym _Z23dev_calculate_GaussiansPddd
.addrsig_sym _Z17dev_reduce_vectorPdS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z17dev_reduce_vectorPdS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z23dev_calculate_GaussiansPddd
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23dev_calculate_GaussiansPddd
.globl _Z23dev_calculate_GaussiansPddd
.p2align 8
.type _Z23dev_calculate_GaussiansPddd,@function
_Z23dev_calculate_GaussiansPddd:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23dev_calculate_GaussiansPddd
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23dev_calculate_GaussiansPddd, .Lfunc_end0-_Z23dev_calculate_GaussiansPddd
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z17dev_reduce_vectorPdS_
.globl _Z17dev_reduce_vectorPdS_
.p2align 8
.type _Z17dev_reduce_vectorPdS_,@function
_Z17dev_reduce_vectorPdS_:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17dev_reduce_vectorPdS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 16
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z17dev_reduce_vectorPdS_, .Lfunc_end1-_Z17dev_reduce_vectorPdS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 8
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23dev_calculate_GaussiansPddd
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z23dev_calculate_GaussiansPddd.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 16
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17dev_reduce_vectorPdS_
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z17dev_reduce_vectorPdS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00044175_00000000-6_exercise1.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Error code "
.LC1:
.string " : "
.LC2:
.string " "
.LC3:
.string " on line "
.LC4:
.string ", aborting.\n"
.text
.globl _Z10checkError9cudaErrori
.type _Z10checkError9cudaErrori, @function
_Z10checkError9cudaErrori:
.LFB3669:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L11
ret
.L11:
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl %edi, %ebx
movl %esi, %r12d
movl $11, %edx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rbp
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %ebx, %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $3, %edx
leaq .LC1(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %ebx, %edi
call cudaGetErrorString@PLT
movq %rax, %rbx
testq %rax, %rax
je .L12
movq %rax, %rdi
call strlen@PLT
movq %rax, %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
.L6:
movl $1, %edx
leaq .LC2(%rip), %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl $9, %edx
leaq .LC3(%rip), %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %r12d, %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $12, %edx
leaq .LC4(%rip), %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
movq 0(%rbp), %rax
movq %rbp, %rdi
addq -24(%rax), %rdi
movl 32(%rdi), %esi
orl $1, %esi
call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT
jmp .L6
.cfi_endproc
.LFE3669:
.size _Z10checkError9cudaErrori, .-_Z10checkError9cudaErrori
.section .rodata.str1.1
.LC12:
.string "Problem with entry "
.LC13:
.string ": "
.LC14:
.string "Sum from CPU: "
.LC15:
.string "Sum from GPU: "
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movslq %eax, %r12
movq %r12, %rax
shrq $60, %rax
jne .L14
salq $3, %r12
movq %r12, %rdi
call _Znam@PLT
movq %rax, %r13
testl %ebp, %ebp
jle .L28
movq %rax, %rbx
leal -1(%rbp), %r15d
leaq 8(%rax,%r15,8), %rbp
.L17:
call rand@PLT
movslq %eax, %rdx
imulq $780903145, %rdx, %rdx
sarq $33, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %ecx
leal (%rdx,%rcx,2), %edx
subl %edx, %eax
subl $5, %eax
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
movsd %xmm0, (%rbx)
addq $8, %rbx
cmpq %rbx, %rbp
jne .L17
movq %r12, %rdi
call _Znam@PLT
movq %rax, %r14
movl $0, %ebx
movq $0x000000000, 8(%rsp)
.L18:
movsd 0(%r13,%rbx,8), %xmm0
subsd .LC6(%rip), %xmm0
divsd .LC7(%rip), %xmm0
mulsd %xmm0, %xmm0
mulsd .LC8(%rip), %xmm0
call exp@PLT
divsd .LC9(%rip), %xmm0
movsd %xmm0, (%r14,%rbx,8)
addsd 8(%rsp), %xmm0
movsd %xmm0, 8(%rsp)
movq %rbx, %rax
addq $1, %rbx
cmpq %r15, %rax
jne .L18
movl $0, %ebx
jmp .L23
.L28:
movq $0x000000000, 8(%rsp)
.L16:
leaq .LC14(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movsd 8(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC15(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movsd 8(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call __cxa_throw_bad_array_new_length@PLT
.L29:
call _ZSt16__throw_bad_castv@PLT
.L21:
movq %r12, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r12), %rax
movl $10, %esi
movq %r12, %rdi
call *48(%rax)
movl %eax, %esi
.L22:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
.L19:
leaq 1(%rbx), %rax
cmpq %r15, %rbx
je .L16
movq %rax, %rbx
.L23:
movsd 0(%r13,%rbx,8), %xmm1
movsd (%r14,%rbx,8), %xmm2
movsd %xmm1, 16(%rsp)
movsd %xmm2, 24(%rsp)
subsd %xmm2, %xmm1
movapd %xmm1, %xmm0
andpd .LC10(%rip), %xmm0
movsd .LC11(%rip), %xmm4
comisd %xmm0, %xmm4
jnb .L19
movl $19, %edx
leaq .LC12(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %ebx, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $2, %edx
leaq .LC13(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movsd 24(%rsp), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rbp
movl $1, %edx
leaq .LC2(%rip), %r12
movq %r12, %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movsd 16(%rsp), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rbp
movl $1, %edx
movq %r12, %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movsd 24(%rsp), %xmm0
subsd 16(%rsp), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rbp
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %r12
testq %r12, %r12
je .L29
cmpb $0, 56(%r12)
je .L21
movzbl 67(%r12), %esi
jmp .L22
.cfi_endproc
.LFE3670:
.size main, .-main
.globl _Z45__device_stub__Z23dev_calculate_GaussiansPdddPddd
.type _Z45__device_stub__Z23dev_calculate_GaussiansPdddPddd, @function
_Z45__device_stub__Z23dev_calculate_GaussiansPdddPddd:
.LFB3695:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movsd %xmm0, 16(%rsp)
movsd %xmm1, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L34
.L30:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L35
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L34:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23dev_calculate_GaussiansPddd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L30
.L35:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z45__device_stub__Z23dev_calculate_GaussiansPdddPddd, .-_Z45__device_stub__Z23dev_calculate_GaussiansPdddPddd
.globl _Z23dev_calculate_GaussiansPddd
.type _Z23dev_calculate_GaussiansPddd, @function
_Z23dev_calculate_GaussiansPddd:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z23dev_calculate_GaussiansPdddPddd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z23dev_calculate_GaussiansPddd, .-_Z23dev_calculate_GaussiansPddd
.globl _Z39__device_stub__Z17dev_reduce_vectorPdS_PdS_
.type _Z39__device_stub__Z17dev_reduce_vectorPdS_PdS_, @function
_Z39__device_stub__Z17dev_reduce_vectorPdS_PdS_:
.LFB3697:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L42
.L38:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L43
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L42:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z17dev_reduce_vectorPdS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L38
.L43:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3697:
.size _Z39__device_stub__Z17dev_reduce_vectorPdS_PdS_, .-_Z39__device_stub__Z17dev_reduce_vectorPdS_PdS_
.globl _Z17dev_reduce_vectorPdS_
.type _Z17dev_reduce_vectorPdS_, @function
_Z17dev_reduce_vectorPdS_:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z17dev_reduce_vectorPdS_PdS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _Z17dev_reduce_vectorPdS_, .-_Z17dev_reduce_vectorPdS_
.section .rodata.str1.1
.LC16:
.string "_Z17dev_reduce_vectorPdS_"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC17:
.string "_Z23dev_calculate_GaussiansPddd"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3700:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC16(%rip), %rdx
movq %rdx, %rcx
leaq _Z17dev_reduce_vectorPdS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC17(%rip), %rdx
movq %rdx, %rcx
leaq _Z23dev_calculate_GaussiansPddd(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3700:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC6:
.long 0
.long 1075052544
.align 8
.LC7:
.long 0
.long 1074266112
.align 8
.LC8:
.long 0
.long -1075838976
.align 8
.LC9:
.long -1343145336
.long 1075713116
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC10:
.long -1
.long 2147483647
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC11:
.long -1598689907
.long 1051772663
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "exercise1.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z10checkError10hipError_ti # -- Begin function _Z10checkError10hipError_ti
.p2align 4, 0x90
.type _Z10checkError10hipError_ti,@function
_Z10checkError10hipError_ti: # @_Z10checkError10hipError_ti
.cfi_startproc
# %bb.0:
testl %edi, %edi
je .LBB0_5
# %bb.1:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebx
movl %edi, %ebp
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $11, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %ebp, %esi
callq _ZNSolsEi
movq %rax, %r14
movl $.L.str.1, %esi
movl $3, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl %ebp, %edi
callq hipGetErrorString
testq %rax, %rax
je .LBB0_2
# %bb.3:
movq %rax, %rdi
movq %rax, %r15
callq strlen
movq %r14, %rdi
movq %r15, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB0_4
.LBB0_5:
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %rbp
.cfi_restore %r14
.cfi_restore %r15
retq
.LBB0_2:
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %rbp, -16
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq (%r14), %rax
movq -24(%rax), %rax
movq %r14, %rdi
addq %rax, %rdi
movl 32(%r14,%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB0_4: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
movl $.L.str.2, %esi
movl $1, %edx
movq %r14, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.3, %esi
movl $9, %edx
movq %r14, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r14, %rdi
movl %ebx, %esi
callq _ZNSolsEi
movl $.L.str.4, %esi
movl $12, %edx
movq %rax, %rdi
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l # TAILCALL
.Lfunc_end0:
.size _Z10checkError10hipError_ti, .Lfunc_end0-_Z10checkError10hipError_ti
.cfi_endproc
# -- End function
.globl _Z38__device_stub__dev_calculate_GaussiansPddd # -- Begin function _Z38__device_stub__dev_calculate_GaussiansPddd
.p2align 4, 0x90
.type _Z38__device_stub__dev_calculate_GaussiansPddd,@function
_Z38__device_stub__dev_calculate_GaussiansPddd: # @_Z38__device_stub__dev_calculate_GaussiansPddd
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movsd %xmm0, 64(%rsp)
movsd %xmm1, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23dev_calculate_GaussiansPddd, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z38__device_stub__dev_calculate_GaussiansPddd, .Lfunc_end1-_Z38__device_stub__dev_calculate_GaussiansPddd
.cfi_endproc
# -- End function
.globl _Z32__device_stub__dev_reduce_vectorPdS_ # -- Begin function _Z32__device_stub__dev_reduce_vectorPdS_
.p2align 4, 0x90
.type _Z32__device_stub__dev_reduce_vectorPdS_,@function
_Z32__device_stub__dev_reduce_vectorPdS_: # @_Z32__device_stub__dev_reduce_vectorPdS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z17dev_reduce_vectorPdS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end2:
.size _Z32__device_stub__dev_reduce_vectorPdS_, .Lfunc_end2-_Z32__device_stub__dev_reduce_vectorPdS_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0xc014000000000000 # double -5
.LCPI3_1:
.quad 0x4008000000000000 # double 3
.LCPI3_2:
.quad 0xbfe0000000000000 # double -0.5
.LCPI3_3:
.quad 0x401e145caff13a88 # double 7.5198848238930011
.LCPI3_5:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI3_4:
.quad 0x7fffffffffffffff # double NaN
.quad 0x7fffffffffffffff # double NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
movslq %r15d, %r12
leaq (,%r12,8), %rax
testl %r12d, %r12d
movq $-1, %r14
cmovnsq %rax, %r14
movq %r14, %rdi
callq _Znam
movq %rax, %rbx
testl %r12d, %r12d
jle .LBB3_3
# %bb.1: # %.lr.ph.preheader
movl %r15d, %r12d
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $780903145, %rax, %rcx # imm = 0x2E8BA2E9
movq %rcx, %rdx
shrq $63, %rdx
sarq $33, %rcx
addl %edx, %ecx
leal (%rcx,%rcx,4), %edx
leal (%rcx,%rdx,2), %ecx
negl %ecx
addl %ecx, %eax
addl $-5, %eax
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
movsd %xmm0, (%rbx,%r13,8)
incq %r13
cmpq %r13, %r12
jne .LBB3_2
.LBB3_3: # %._crit_edge
movq %r14, %rdi
callq _Znam
movq %rax, %r14
testl %r15d, %r15d
jle .LBB3_4
# %bb.11: # %.lr.ph59.preheader
movl %r15d, %r12d
xorpd %xmm0, %xmm0
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB3_12: # %.lr.ph59
# =>This Inner Loop Header: Depth=1
movsd %xmm0, (%rsp) # 8-byte Spill
movsd (%rbx,%r13,8), %xmm0 # xmm0 = mem[0],zero
addsd .LCPI3_0(%rip), %xmm0
divsd .LCPI3_1(%rip), %xmm0
mulsd %xmm0, %xmm0
mulsd .LCPI3_2(%rip), %xmm0
callq exp
divsd .LCPI3_3(%rip), %xmm0
movsd %xmm0, (%r14,%r13,8)
movsd (%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
addsd %xmm0, %xmm1
movsd %xmm1, (%rsp) # 8-byte Spill
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
incq %r13
cmpq %r13, %r12
jne .LBB3_12
jmp .LBB3_5
.LBB3_4:
xorpd %xmm0, %xmm0
.LBB3_5: # %.preheader
movsd %xmm0, (%rsp) # 8-byte Spill
testl %r15d, %r15d
jle .LBB3_13
# %bb.6: # %.lr.ph61.preheader
movl %r15d, %r13d
xorl %r15d, %r15d
movapd .LCPI3_4(%rip), %xmm1 # xmm1 = [NaN,NaN]
movsd .LCPI3_5(%rip), %xmm2 # xmm2 = mem[0],zero
jmp .LBB3_7
.LBB3_22: # in Loop: Header=BB3_7 Depth=1
movq %r12, %rdi
movq %rax, %rbp
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbp, %rax
.LBB3_23: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit54
# in Loop: Header=BB3_7 Depth=1
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movapd .LCPI3_4(%rip), %xmm1 # xmm1 = [NaN,NaN]
movsd .LCPI3_5(%rip), %xmm2 # xmm2 = mem[0],zero
.LBB3_24: # in Loop: Header=BB3_7 Depth=1
incq %r15
cmpq %r15, %r13
je .LBB3_13
.LBB3_7: # %.lr.ph61
# =>This Inner Loop Header: Depth=1
movsd (%rbx,%r15,8), %xmm4 # xmm4 = mem[0],zero
movsd (%r14,%r15,8), %xmm3 # xmm3 = mem[0],zero
movapd %xmm4, %xmm0
subsd %xmm3, %xmm0
andpd %xmm1, %xmm0
ucomisd %xmm0, %xmm2
jae .LBB3_24
# %bb.8: # in Loop: Header=BB3_7 Depth=1
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $19, %edx
movsd %xmm3, 8(%rsp) # 8-byte Spill
movsd %xmm4, 16(%rsp) # 8-byte Spill
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r15d, %esi
callq _ZNSolsEi
movq %rax, %r12
movl $.L.str.6, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r12, %rdi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq %rax, %r12
movl $.L.str.2, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r12, %rdi
movsd 16(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq %rax, %r12
movl $.L.str.2, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
subsd 16(%rsp), %xmm0 # 8-byte Folded Reload
movq %r12, %rdi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB3_25
# %bb.9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i51
# in Loop: Header=BB3_7 Depth=1
cmpb $0, 56(%r12)
je .LBB3_22
# %bb.10: # in Loop: Header=BB3_7 Depth=1
movzbl 67(%r12), %ecx
jmp .LBB3_23
.LBB3_13: # %._crit_edge62
movl $_ZSt4cout, %edi
movl $.L.str.7, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB3_25
# %bb.14: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB3_16
# %bb.15:
movzbl 67(%rbx), %ecx
jmp .LBB3_17
.LBB3_16:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB3_17: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.8, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB3_25
# %bb.18: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i46
cmpb $0, 56(%rbx)
je .LBB3_20
# %bb.19:
movzbl 67(%rbx), %ecx
jmp .LBB3_21
.LBB3_20:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB3_21: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit49
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_25:
.cfi_def_cfa_offset 80
callq _ZSt16__throw_bad_castv
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23dev_calculate_GaussiansPddd, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17dev_reduce_vectorPdS_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error code "
.size .L.str, 12
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " : "
.size .L.str.1, 4
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz " "
.size .L.str.2, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz " on line "
.size .L.str.3, 10
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz ", aborting.\n"
.size .L.str.4, 13
.type _Z23dev_calculate_GaussiansPddd,@object # @_Z23dev_calculate_GaussiansPddd
.section .rodata,"a",@progbits
.globl _Z23dev_calculate_GaussiansPddd
.p2align 3, 0x0
_Z23dev_calculate_GaussiansPddd:
.quad _Z38__device_stub__dev_calculate_GaussiansPddd
.size _Z23dev_calculate_GaussiansPddd, 8
.type _Z17dev_reduce_vectorPdS_,@object # @_Z17dev_reduce_vectorPdS_
.globl _Z17dev_reduce_vectorPdS_
.p2align 3, 0x0
_Z17dev_reduce_vectorPdS_:
.quad _Z32__device_stub__dev_reduce_vectorPdS_
.size _Z17dev_reduce_vectorPdS_, 8
.type .L.str.5,@object # @.str.5
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.5:
.asciz "Problem with entry "
.size .L.str.5, 20
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz ": "
.size .L.str.6, 3
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Sum from CPU: "
.size .L.str.7, 15
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Sum from GPU: "
.size .L.str.8, 15
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z23dev_calculate_GaussiansPddd"
.size .L__unnamed_1, 32
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z17dev_reduce_vectorPdS_"
.size .L__unnamed_2, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__dev_calculate_GaussiansPddd
.addrsig_sym _Z32__device_stub__dev_reduce_vectorPdS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZSt4cout
.addrsig_sym _Z23dev_calculate_GaussiansPddd
.addrsig_sym _Z17dev_reduce_vectorPdS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda_runtime.h>
__global__ void computeForcesKernel(int N, const double3 *p, double3 *f) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
double3 ftot{0.0, 0.0, 0.0};
for (int i = 0; i < N; ++i) {
double dx = p[i].x - p[idx].x;
double dy = p[i].y - p[idx].y;
double dz = p[i].z - p[idx].z;
double inv_r = 1 / sqrt(1e-150 + dx * dx + dy * dy + dz * dz);
double inv_rrr = inv_r * inv_r * inv_r;
ftot.x += dx * inv_rrr;
ftot.y += dy * inv_rrr;
ftot.z += dz * inv_rrr;
}
f[idx] = ftot;
}
void computeForces(int N, const double3 *p, double3 *f) {
constexpr int numThreads = 1024;
int numBlocks = (N + numThreads - 1) / numThreads;
computeForcesKernel<<<numBlocks, numThreads>>>(N, p, f);
} | .file "tmpxft_001ada08_00000000-6_nbody_b.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z51__device_stub__Z19computeForcesKerneliPK7double3PS_iPK7double3PS_
.type _Z51__device_stub__Z19computeForcesKerneliPK7double3PS_iPK7double3PS_, @function
_Z51__device_stub__Z19computeForcesKerneliPK7double3PS_iPK7double3PS_:
.LFB2052:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19computeForcesKerneliPK7double3PS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z51__device_stub__Z19computeForcesKerneliPK7double3PS_iPK7double3PS_, .-_Z51__device_stub__Z19computeForcesKerneliPK7double3PS_iPK7double3PS_
.globl _Z19computeForcesKerneliPK7double3PS_
.type _Z19computeForcesKerneliPK7double3PS_, @function
_Z19computeForcesKerneliPK7double3PS_:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z19computeForcesKerneliPK7double3PS_iPK7double3PS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z19computeForcesKerneliPK7double3PS_, .-_Z19computeForcesKerneliPK7double3PS_
.globl _Z13computeForcesiPK7double3PS_
.type _Z13computeForcesiPK7double3PS_, @function
_Z13computeForcesiPK7double3PS_:
.LFB2027:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movl %edi, %ebx
movq %rsi, %rbp
movq %rdx, %r12
movl $1024, 20(%rsp)
movl $1, 24(%rsp)
leal 2046(%rdi), %eax
movl %edi, %edx
addl $1023, %edx
cmovns %edx, %eax
sarl $10, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L11:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movq %r12, %rdx
movq %rbp, %rsi
movl %ebx, %edi
call _Z51__device_stub__Z19computeForcesKerneliPK7double3PS_iPK7double3PS_
jmp .L11
.cfi_endproc
.LFE2027:
.size _Z13computeForcesiPK7double3PS_, .-_Z13computeForcesiPK7double3PS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z19computeForcesKerneliPK7double3PS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19computeForcesKerneliPK7double3PS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda_runtime.h>
__global__ void computeForcesKernel(int N, const double3 *p, double3 *f) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
double3 ftot{0.0, 0.0, 0.0};
for (int i = 0; i < N; ++i) {
double dx = p[i].x - p[idx].x;
double dy = p[i].y - p[idx].y;
double dz = p[i].z - p[idx].z;
double inv_r = 1 / sqrt(1e-150 + dx * dx + dy * dy + dz * dz);
double inv_rrr = inv_r * inv_r * inv_r;
ftot.x += dx * inv_rrr;
ftot.y += dy * inv_rrr;
ftot.z += dz * inv_rrr;
}
f[idx] = ftot;
}
void computeForces(int N, const double3 *p, double3 *f) {
constexpr int numThreads = 1024;
int numBlocks = (N + numThreads - 1) / numThreads;
computeForcesKernel<<<numBlocks, numThreads>>>(N, p, f);
} | #include <hip/hip_runtime.h>
__global__ void computeForcesKernel(int N, const double3 *p, double3 *f) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
double3 ftot{0.0, 0.0, 0.0};
for (int i = 0; i < N; ++i) {
double dx = p[i].x - p[idx].x;
double dy = p[i].y - p[idx].y;
double dz = p[i].z - p[idx].z;
double inv_r = 1 / sqrt(1e-150 + dx * dx + dy * dy + dz * dz);
double inv_rrr = inv_r * inv_r * inv_r;
ftot.x += dx * inv_rrr;
ftot.y += dy * inv_rrr;
ftot.z += dz * inv_rrr;
}
f[idx] = ftot;
}
void computeForces(int N, const double3 *p, double3 *f) {
constexpr int numThreads = 1024;
int numBlocks = (N + numThreads - 1) / numThreads;
computeForcesKernel<<<numBlocks, numThreads>>>(N, p, f);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
__global__ void computeForcesKernel(int N, const double3 *p, double3 *f) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
double3 ftot{0.0, 0.0, 0.0};
for (int i = 0; i < N; ++i) {
double dx = p[i].x - p[idx].x;
double dy = p[i].y - p[idx].y;
double dz = p[i].z - p[idx].z;
double inv_r = 1 / sqrt(1e-150 + dx * dx + dy * dy + dz * dz);
double inv_rrr = inv_r * inv_r * inv_r;
ftot.x += dx * inv_rrr;
ftot.y += dy * inv_rrr;
ftot.z += dz * inv_rrr;
}
f[idx] = ftot;
}
void computeForces(int N, const double3 *p, double3 *f) {
constexpr int numThreads = 1024;
int numBlocks = (N + numThreads - 1) / numThreads;
computeForcesKernel<<<numBlocks, numThreads>>>(N, p, f);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.globl _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.p2align 8
.type _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_,@function
_Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s6, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[8:9], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s6, v8
s_cbranch_execz .LBB0_5
v_mov_b32_e32 v0, 0
v_mov_b32_e32 v1, 0
s_cmp_lt_i32 s6, 1
s_delay_alu instid0(VALU_DEP_1)
v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
s_cbranch_scc1 .LBB0_4
s_load_b64 s[2:3], s[0:1], 0x8
s_mov_b32 s5, 0x20ca2fe7
s_mov_b32 s4, 0x6a3f9475
s_waitcnt lgkmcnt(0)
v_mad_i64_i32 v[0:1], null, v8, 24, s[2:3]
s_add_u32 s2, s2, 8
s_addc_u32 s3, s3, 0
s_clause 0x1
global_load_b128 v[4:7], v[0:1], off
global_load_b64 v[11:12], v[0:1], off offset:16
v_mov_b32_e32 v0, 0
v_mov_b32_e32 v1, 0
s_delay_alu instid0(VALU_DEP_1)
v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
.LBB0_3:
s_add_u32 s8, s2, -8
s_addc_u32 s9, s3, -1
s_add_i32 s6, s6, -1
s_clause 0x1
s_load_b64 s[12:13], s[8:9], 0x0
s_load_b128 s[8:11], s[2:3], 0x0
s_add_u32 s2, s2, 24
s_addc_u32 s3, s3, 0
s_cmp_eq_u32 s6, 0
s_waitcnt vmcnt(1) lgkmcnt(0)
v_add_f64 v[13:14], s[12:13], -v[4:5]
v_add_f64 v[15:16], s[8:9], -v[6:7]
s_waitcnt vmcnt(0)
v_add_f64 v[19:20], s[10:11], -v[11:12]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[17:18], v[13:14], v[13:14], s[4:5]
v_fma_f64 v[17:18], v[15:16], v[15:16], v[17:18]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[17:18], v[19:20], v[19:20], v[17:18]
v_cmp_gt_f64_e32 vcc_lo, 0x10000000, v[17:18]
v_cndmask_b32_e64 v21, 0, 1, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v21, 8, v21
v_ldexp_f64 v[17:18], v[17:18], v21
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_rsq_f64_e32 v[21:22], v[17:18]
s_waitcnt_depctr 0xfff
v_mul_f64 v[23:24], v[17:18], v[21:22]
v_mul_f64 v[21:22], v[21:22], 0.5
v_fma_f64 v[25:26], -v[21:22], v[23:24], 0.5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fma_f64 v[23:24], v[23:24], v[25:26], v[23:24]
v_fma_f64 v[21:22], v[21:22], v[25:26], v[21:22]
v_fma_f64 v[25:26], -v[23:24], v[23:24], v[17:18]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[23:24], v[25:26], v[21:22], v[23:24]
v_fma_f64 v[25:26], -v[23:24], v[23:24], v[17:18]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_fma_f64 v[21:22], v[25:26], v[21:22], v[23:24]
v_cndmask_b32_e64 v23, 0, 0xffffff80, vcc_lo
v_cmp_class_f64_e64 vcc_lo, v[17:18], 0x260
v_ldexp_f64 v[21:22], v[21:22], v23
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v18, v22, v18 :: v_dual_cndmask_b32 v17, v21, v17
v_div_scale_f64 v[21:22], null, v[17:18], v[17:18], 1.0
v_div_scale_f64 v[27:28], vcc_lo, 1.0, v[17:18], 1.0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f64_e32 v[23:24], v[21:22]
s_waitcnt_depctr 0xfff
v_fma_f64 v[25:26], -v[21:22], v[23:24], 1.0
v_fma_f64 v[23:24], v[23:24], v[25:26], v[23:24]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[25:26], -v[21:22], v[23:24], 1.0
v_fma_f64 v[23:24], v[23:24], v[25:26], v[23:24]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[25:26], v[27:28], v[23:24]
v_fma_f64 v[21:22], -v[21:22], v[25:26], v[27:28]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f64 v[21:22], v[21:22], v[23:24], v[25:26]
v_div_fixup_f64 v[17:18], v[21:22], v[17:18], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[21:22], v[17:18], v[17:18]
v_mul_f64 v[17:18], v[17:18], v[21:22]
s_delay_alu instid0(VALU_DEP_1)
v_fma_f64 v[0:1], v[13:14], v[17:18], v[0:1]
v_fma_f64 v[2:3], v[15:16], v[17:18], v[2:3]
v_fma_f64 v[9:10], v[19:20], v[17:18], v[9:10]
s_cbranch_scc0 .LBB0_3
.LBB0_4:
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
v_mad_i64_i32 v[4:5], null, v8, 24, s[0:1]
s_clause 0x1
global_store_b128 v[4:5], v[0:3], off
global_store_b64 v[4:5], v[9:10], off offset:16
.LBB0_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 29
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_, .Lfunc_end0-_Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 29
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
__global__ void computeForcesKernel(int N, const double3 *p, double3 *f) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
double3 ftot{0.0, 0.0, 0.0};
for (int i = 0; i < N; ++i) {
double dx = p[i].x - p[idx].x;
double dy = p[i].y - p[idx].y;
double dz = p[i].z - p[idx].z;
double inv_r = 1 / sqrt(1e-150 + dx * dx + dy * dy + dz * dz);
double inv_rrr = inv_r * inv_r * inv_r;
ftot.x += dx * inv_rrr;
ftot.y += dy * inv_rrr;
ftot.z += dz * inv_rrr;
}
f[idx] = ftot;
}
void computeForces(int N, const double3 *p, double3 *f) {
constexpr int numThreads = 1024;
int numBlocks = (N + numThreads - 1) / numThreads;
computeForcesKernel<<<numBlocks, numThreads>>>(N, p, f);
} | .text
.file "nbody_b.hip"
.globl _Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_ # -- Begin function _Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.p2align 4, 0x90
.type _Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_,@function
_Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_: # @_Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_, .Lfunc_end0-_Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.cfi_endproc
# -- End function
.globl _Z13computeForcesiPK15HIP_vector_typeIdLj3EEPS0_ # -- Begin function _Z13computeForcesiPK15HIP_vector_typeIdLj3EEPS0_
.p2align 4, 0x90
.type _Z13computeForcesiPK15HIP_vector_typeIdLj3EEPS0_,@function
_Z13computeForcesiPK15HIP_vector_typeIdLj3EEPS0_: # @_Z13computeForcesiPK15HIP_vector_typeIdLj3EEPS0_
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdx, %rbx
movq %rsi, %r14
movl %edi, %r15d
leal 1023(%r15), %eax
leal 2046(%r15), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $10, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $1024, %rdx # imm = 0x400
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movl %r15d, 12(%rsp)
movq %r14, 72(%rsp)
movq %rbx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z13computeForcesiPK15HIP_vector_typeIdLj3EEPS0_, .Lfunc_end1-_Z13computeForcesiPK15HIP_vector_typeIdLj3EEPS0_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_,@object # @_Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.section .rodata,"a",@progbits
.globl _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.p2align 3, 0x0
_Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_:
.quad _Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.size _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_"
.size .L__unnamed_1, 55
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001ada08_00000000-6_nbody_b.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z51__device_stub__Z19computeForcesKerneliPK7double3PS_iPK7double3PS_
.type _Z51__device_stub__Z19computeForcesKerneliPK7double3PS_iPK7double3PS_, @function
_Z51__device_stub__Z19computeForcesKerneliPK7double3PS_iPK7double3PS_:
.LFB2052:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19computeForcesKerneliPK7double3PS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z51__device_stub__Z19computeForcesKerneliPK7double3PS_iPK7double3PS_, .-_Z51__device_stub__Z19computeForcesKerneliPK7double3PS_iPK7double3PS_
.globl _Z19computeForcesKerneliPK7double3PS_
.type _Z19computeForcesKerneliPK7double3PS_, @function
_Z19computeForcesKerneliPK7double3PS_:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z19computeForcesKerneliPK7double3PS_iPK7double3PS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z19computeForcesKerneliPK7double3PS_, .-_Z19computeForcesKerneliPK7double3PS_
.globl _Z13computeForcesiPK7double3PS_
.type _Z13computeForcesiPK7double3PS_, @function
_Z13computeForcesiPK7double3PS_:
.LFB2027:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movl %edi, %ebx
movq %rsi, %rbp
movq %rdx, %r12
movl $1024, 20(%rsp)
movl $1, 24(%rsp)
leal 2046(%rdi), %eax
movl %edi, %edx
addl $1023, %edx
cmovns %edx, %eax
sarl $10, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L11:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movq %r12, %rdx
movq %rbp, %rsi
movl %ebx, %edi
call _Z51__device_stub__Z19computeForcesKerneliPK7double3PS_iPK7double3PS_
jmp .L11
.cfi_endproc
.LFE2027:
.size _Z13computeForcesiPK7double3PS_, .-_Z13computeForcesiPK7double3PS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z19computeForcesKerneliPK7double3PS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19computeForcesKerneliPK7double3PS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "nbody_b.hip"
.globl _Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_ # -- Begin function _Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.p2align 4, 0x90
.type _Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_,@function
_Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_: # @_Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_, .Lfunc_end0-_Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.cfi_endproc
# -- End function
.globl _Z13computeForcesiPK15HIP_vector_typeIdLj3EEPS0_ # -- Begin function _Z13computeForcesiPK15HIP_vector_typeIdLj3EEPS0_
.p2align 4, 0x90
.type _Z13computeForcesiPK15HIP_vector_typeIdLj3EEPS0_,@function
_Z13computeForcesiPK15HIP_vector_typeIdLj3EEPS0_: # @_Z13computeForcesiPK15HIP_vector_typeIdLj3EEPS0_
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdx, %rbx
movq %rsi, %r14
movl %edi, %r15d
leal 1023(%r15), %eax
leal 2046(%r15), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $10, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $1024, %rdx # imm = 0x400
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movl %r15d, 12(%rsp)
movq %r14, 72(%rsp)
movq %rbx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z13computeForcesiPK15HIP_vector_typeIdLj3EEPS0_, .Lfunc_end1-_Z13computeForcesiPK15HIP_vector_typeIdLj3EEPS0_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_,@object # @_Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.section .rodata,"a",@progbits
.globl _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.p2align 3, 0x0
_Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_:
.quad _Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.size _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_"
.size .L__unnamed_1, 55
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19computeForcesKerneliPK15HIP_vector_typeIdLj3EEPS0_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void cudaSReduceIndex_kernel( const unsigned int inputSize, const unsigned int inputBatchOffset, const unsigned int outputBatchOffset, const float* valueThreshold, const float* inputs, int* outputMap, float* scores)
{
const int batchPos = blockIdx.z;
const int clsPos = blockIdx.y;
const int index = (threadIdx.x & 0x1f) + blockIdx.x*blockDim.x;
const int inputIndex = index
+ inputSize*clsPos
+ batchPos*inputBatchOffset;
const int outputIndex = index
+ inputSize*clsPos
+ batchPos*outputBatchOffset;
if(index < inputSize)
{
float value = inputs[inputIndex];
if(value >= valueThreshold[clsPos])
{
outputMap[outputIndex] = index;
scores[outputIndex] = value;
}
else
{
outputMap[outputIndex] = -1;
scores[outputIndex] = -1.0;
}
}
} | code for sm_80
Function : _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e280000002100 */
/*0020*/ S2R R11, SR_CTAID.X ; /* 0x00000000000b7919 */
/* 0x000e620000002500 */
/*0030*/ LOP3.LUT R0, R0, 0x1f, RZ, 0xc0, !PT ; /* 0x0000001f00007812 */
/* 0x001fca00078ec0ff */
/*0040*/ IMAD R11, R11, c[0x0][0x0], R0 ; /* 0x000000000b0b7a24 */
/* 0x002fca00078e0200 */
/*0050*/ ISETP.GE.U32.AND P0, PT, R11, c[0x0][0x160], PT ; /* 0x000058000b007a0c */
/* 0x000fda0003f06070 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ S2R R4, SR_CTAID.Y ; /* 0x0000000000047919 */
/* 0x000e220000002600 */
/*0080*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*00a0*/ S2R R7, SR_CTAID.Z ; /* 0x0000000000077919 */
/* 0x000e620000002700 */
/*00b0*/ IMAD R0, R4, c[0x0][0x160], R11 ; /* 0x0000580004007a24 */
/* 0x001fcc00078e020b */
/*00c0*/ IMAD.WIDE R4, R4, R9, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fc800078e0209 */
/*00d0*/ IMAD R2, R7, c[0x0][0x164], R0 ; /* 0x0000590007027a24 */
/* 0x002fe400078e0200 */
/*00e0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea4000c1e1900 */
/*00f0*/ IMAD.WIDE R2, R2, R9, c[0x0][0x178] ; /* 0x00005e0002027625 */
/* 0x000fcc00078e0209 */
/*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD R0, R7, c[0x0][0x168], R0 ; /* 0x00005a0007007a24 */
/* 0x000fc800078e0200 */
/*0120*/ IMAD.WIDE R6, R0, R9, c[0x0][0x180] ; /* 0x0000600000067625 */
/* 0x000fc800078e0209 */
/*0130*/ IMAD.WIDE R8, R0, R9, c[0x0][0x188] ; /* 0x0000620000087625 */
/* 0x000fe200078e0209 */
/*0140*/ FSETP.GE.AND P0, PT, R3, R4, PT ; /* 0x000000040300720b */
/* 0x004fda0003f06000 */
/*0150*/ @P0 STG.E [R6.64], R11 ; /* 0x0000000b06000986 */
/* 0x0001e8000c101904 */
/*0160*/ @P0 STG.E [R8.64], R3 ; /* 0x0000000308000986 */
/* 0x0001e2000c101904 */
/*0170*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0180*/ MOV R3, 0xffffffff ; /* 0xffffffff00037802 */
/* 0x001fe40000000f00 */
/*0190*/ MOV R5, 0xbf800000 ; /* 0xbf80000000057802 */
/* 0x000fc60000000f00 */
/*01a0*/ STG.E [R6.64], R3 ; /* 0x0000000306007986 */
/* 0x000fe8000c101904 */
/*01b0*/ STG.E [R8.64], R5 ; /* 0x0000000508007986 */
/* 0x000fe2000c101904 */
/*01c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void cudaSReduceIndex_kernel( const unsigned int inputSize, const unsigned int inputBatchOffset, const unsigned int outputBatchOffset, const float* valueThreshold, const float* inputs, int* outputMap, float* scores)
{
const int batchPos = blockIdx.z;
const int clsPos = blockIdx.y;
const int index = (threadIdx.x & 0x1f) + blockIdx.x*blockDim.x;
const int inputIndex = index
+ inputSize*clsPos
+ batchPos*inputBatchOffset;
const int outputIndex = index
+ inputSize*clsPos
+ batchPos*outputBatchOffset;
if(index < inputSize)
{
float value = inputs[inputIndex];
if(value >= valueThreshold[clsPos])
{
outputMap[outputIndex] = index;
scores[outputIndex] = value;
}
else
{
outputMap[outputIndex] = -1;
scores[outputIndex] = -1.0;
}
}
} | .file "tmpxft_000ba4d8_00000000-6_cudaSReduceIndex_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z54__device_stub__Z23cudaSReduceIndex_kerneljjjPKfS0_PiPfjjjPKfS0_PiPf
.type _Z54__device_stub__Z23cudaSReduceIndex_kerneljjjPKfS0_PiPfjjjPKfS0_PiPf, @function
_Z54__device_stub__Z23cudaSReduceIndex_kerneljjjPKfS0_PiPfjjjPKfS0_PiPf:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 44(%rsp)
movl %esi, 40(%rsp)
movl %edx, 36(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq 192(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movq %rsp, %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z54__device_stub__Z23cudaSReduceIndex_kerneljjjPKfS0_PiPfjjjPKfS0_PiPf, .-_Z54__device_stub__Z23cudaSReduceIndex_kerneljjjPKfS0_PiPfjjjPKfS0_PiPf
.globl _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.type _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf, @function
_Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z54__device_stub__Z23cudaSReduceIndex_kerneljjjPKfS0_PiPfjjjPKfS0_PiPf
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf, .-_Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void cudaSReduceIndex_kernel( const unsigned int inputSize, const unsigned int inputBatchOffset, const unsigned int outputBatchOffset, const float* valueThreshold, const float* inputs, int* outputMap, float* scores)
{
const int batchPos = blockIdx.z;
const int clsPos = blockIdx.y;
const int index = (threadIdx.x & 0x1f) + blockIdx.x*blockDim.x;
const int inputIndex = index
+ inputSize*clsPos
+ batchPos*inputBatchOffset;
const int outputIndex = index
+ inputSize*clsPos
+ batchPos*outputBatchOffset;
if(index < inputSize)
{
float value = inputs[inputIndex];
if(value >= valueThreshold[clsPos])
{
outputMap[outputIndex] = index;
scores[outputIndex] = value;
}
else
{
outputMap[outputIndex] = -1;
scores[outputIndex] = -1.0;
}
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cudaSReduceIndex_kernel( const unsigned int inputSize, const unsigned int inputBatchOffset, const unsigned int outputBatchOffset, const float* valueThreshold, const float* inputs, int* outputMap, float* scores)
{
const int batchPos = blockIdx.z;
const int clsPos = blockIdx.y;
const int index = (threadIdx.x & 0x1f) + blockIdx.x*blockDim.x;
const int inputIndex = index
+ inputSize*clsPos
+ batchPos*inputBatchOffset;
const int outputIndex = index
+ inputSize*clsPos
+ batchPos*outputBatchOffset;
if(index < inputSize)
{
float value = inputs[inputIndex];
if(value >= valueThreshold[clsPos])
{
outputMap[outputIndex] = index;
scores[outputIndex] = value;
}
else
{
outputMap[outputIndex] = -1;
scores[outputIndex] = -1.0;
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cudaSReduceIndex_kernel( const unsigned int inputSize, const unsigned int inputBatchOffset, const unsigned int outputBatchOffset, const float* valueThreshold, const float* inputs, int* outputMap, float* scores)
{
const int batchPos = blockIdx.z;
const int clsPos = blockIdx.y;
const int index = (threadIdx.x & 0x1f) + blockIdx.x*blockDim.x;
const int inputIndex = index
+ inputSize*clsPos
+ batchPos*inputBatchOffset;
const int outputIndex = index
+ inputSize*clsPos
+ batchPos*outputBatchOffset;
if(index < inputSize)
{
float value = inputs[inputIndex];
if(value >= valueThreshold[clsPos])
{
outputMap[outputIndex] = index;
scores[outputIndex] = value;
}
else
{
outputMap[outputIndex] = -1;
scores[outputIndex] = -1.0;
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.globl _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.p2align 8
.type _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf,@function
_Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x3c
s_load_b32 s2, s[0:1], 0x0
v_and_b32_e32 v2, 31, v0
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[0:1], null, s13, s3, v[2:3]
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s2, v0
s_cbranch_execz .LBB0_2
s_load_b64 s[8:9], s[0:1], 0x4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s14, s2, v[0:1]
s_load_b256 s[0:7], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[2:3], null, s15, s8, v[1:2]
s_mov_b32 s8, s15
s_ashr_i32 s15, s14, 31
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
s_lshl_b64 s[2:3], s[14:15], 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
s_add_u32 s0, s0, s2
global_load_b32 v5, v[2:3], off
v_mad_u64_u32 v[2:3], null, s8, s9, v[1:2]
s_addc_u32 s1, s1, s3
s_load_b32 s0, s[0:1], 0x0
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[2:3]
v_add_co_u32 v3, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v2, vcc_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
v_cmp_nle_f32_e32 vcc_lo, s0, v5
v_add_co_u32 v1, s0, s6, v1
v_add_co_ci_u32_e64 v2, s0, s7, v2, s0
v_cndmask_b32_e64 v0, v0, -1, vcc_lo
v_cndmask_b32_e64 v5, v5, -1.0, vcc_lo
global_store_b32 v[3:4], v0, off
global_store_b32 v[1:2], v5, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf, .Lfunc_end0-_Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cudaSReduceIndex_kernel( const unsigned int inputSize, const unsigned int inputBatchOffset, const unsigned int outputBatchOffset, const float* valueThreshold, const float* inputs, int* outputMap, float* scores)
{
const int batchPos = blockIdx.z;
const int clsPos = blockIdx.y;
const int index = (threadIdx.x & 0x1f) + blockIdx.x*blockDim.x;
const int inputIndex = index
+ inputSize*clsPos
+ batchPos*inputBatchOffset;
const int outputIndex = index
+ inputSize*clsPos
+ batchPos*outputBatchOffset;
if(index < inputSize)
{
float value = inputs[inputIndex];
if(value >= valueThreshold[clsPos])
{
outputMap[outputIndex] = index;
scores[outputIndex] = value;
}
else
{
outputMap[outputIndex] = -1;
scores[outputIndex] = -1.0;
}
}
} | .text
.file "cudaSReduceIndex_kernel.hip"
.globl _Z38__device_stub__cudaSReduceIndex_kerneljjjPKfS0_PiPf # -- Begin function _Z38__device_stub__cudaSReduceIndex_kerneljjjPKfS0_PiPf
.p2align 4, 0x90
.type _Z38__device_stub__cudaSReduceIndex_kerneljjjPKfS0_PiPf,@function
_Z38__device_stub__cudaSReduceIndex_kerneljjjPKfS0_PiPf: # @_Z38__device_stub__cudaSReduceIndex_kerneljjjPKfS0_PiPf
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z38__device_stub__cudaSReduceIndex_kerneljjjPKfS0_PiPf, .Lfunc_end0-_Z38__device_stub__cudaSReduceIndex_kerneljjjPKfS0_PiPf
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf,@object # @_Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.section .rodata,"a",@progbits
.globl _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.p2align 3, 0x0
_Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf:
.quad _Z38__device_stub__cudaSReduceIndex_kerneljjjPKfS0_PiPf
.size _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf"
.size .L__unnamed_1, 41
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__cudaSReduceIndex_kerneljjjPKfS0_PiPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e280000002100 */
/*0020*/ S2R R11, SR_CTAID.X ; /* 0x00000000000b7919 */
/* 0x000e620000002500 */
/*0030*/ LOP3.LUT R0, R0, 0x1f, RZ, 0xc0, !PT ; /* 0x0000001f00007812 */
/* 0x001fca00078ec0ff */
/*0040*/ IMAD R11, R11, c[0x0][0x0], R0 ; /* 0x000000000b0b7a24 */
/* 0x002fca00078e0200 */
/*0050*/ ISETP.GE.U32.AND P0, PT, R11, c[0x0][0x160], PT ; /* 0x000058000b007a0c */
/* 0x000fda0003f06070 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ S2R R4, SR_CTAID.Y ; /* 0x0000000000047919 */
/* 0x000e220000002600 */
/*0080*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*00a0*/ S2R R7, SR_CTAID.Z ; /* 0x0000000000077919 */
/* 0x000e620000002700 */
/*00b0*/ IMAD R0, R4, c[0x0][0x160], R11 ; /* 0x0000580004007a24 */
/* 0x001fcc00078e020b */
/*00c0*/ IMAD.WIDE R4, R4, R9, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fc800078e0209 */
/*00d0*/ IMAD R2, R7, c[0x0][0x164], R0 ; /* 0x0000590007027a24 */
/* 0x002fe400078e0200 */
/*00e0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea4000c1e1900 */
/*00f0*/ IMAD.WIDE R2, R2, R9, c[0x0][0x178] ; /* 0x00005e0002027625 */
/* 0x000fcc00078e0209 */
/*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD R0, R7, c[0x0][0x168], R0 ; /* 0x00005a0007007a24 */
/* 0x000fc800078e0200 */
/*0120*/ IMAD.WIDE R6, R0, R9, c[0x0][0x180] ; /* 0x0000600000067625 */
/* 0x000fc800078e0209 */
/*0130*/ IMAD.WIDE R8, R0, R9, c[0x0][0x188] ; /* 0x0000620000087625 */
/* 0x000fe200078e0209 */
/*0140*/ FSETP.GE.AND P0, PT, R3, R4, PT ; /* 0x000000040300720b */
/* 0x004fda0003f06000 */
/*0150*/ @P0 STG.E [R6.64], R11 ; /* 0x0000000b06000986 */
/* 0x0001e8000c101904 */
/*0160*/ @P0 STG.E [R8.64], R3 ; /* 0x0000000308000986 */
/* 0x0001e2000c101904 */
/*0170*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0180*/ MOV R3, 0xffffffff ; /* 0xffffffff00037802 */
/* 0x001fe40000000f00 */
/*0190*/ MOV R5, 0xbf800000 ; /* 0xbf80000000057802 */
/* 0x000fc60000000f00 */
/*01a0*/ STG.E [R6.64], R3 ; /* 0x0000000306007986 */
/* 0x000fe8000c101904 */
/*01b0*/ STG.E [R8.64], R5 ; /* 0x0000000508007986 */
/* 0x000fe2000c101904 */
/*01c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.globl _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.p2align 8
.type _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf,@function
_Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x3c
s_load_b32 s2, s[0:1], 0x0
v_and_b32_e32 v2, 31, v0
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[0:1], null, s13, s3, v[2:3]
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s2, v0
s_cbranch_execz .LBB0_2
s_load_b64 s[8:9], s[0:1], 0x4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s14, s2, v[0:1]
s_load_b256 s[0:7], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[2:3], null, s15, s8, v[1:2]
s_mov_b32 s8, s15
s_ashr_i32 s15, s14, 31
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
s_lshl_b64 s[2:3], s[14:15], 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
s_add_u32 s0, s0, s2
global_load_b32 v5, v[2:3], off
v_mad_u64_u32 v[2:3], null, s8, s9, v[1:2]
s_addc_u32 s1, s1, s3
s_load_b32 s0, s[0:1], 0x0
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[2:3]
v_add_co_u32 v3, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v2, vcc_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
v_cmp_nle_f32_e32 vcc_lo, s0, v5
v_add_co_u32 v1, s0, s6, v1
v_add_co_ci_u32_e64 v2, s0, s7, v2, s0
v_cndmask_b32_e64 v0, v0, -1, vcc_lo
v_cndmask_b32_e64 v5, v5, -1.0, vcc_lo
global_store_b32 v[3:4], v0, off
global_store_b32 v[1:2], v5, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf, .Lfunc_end0-_Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23cudaSReduceIndex_kerneljjjPKfS0_PiPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.