system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
void initialize(int* input, int size) {
for (int index = 0; index < size; index++) {
input[index] = index;
}
}
int cpu_reduction(int* input, int size) {
int result = 0;
for (int index = 0; index < size; index++) {
result += index;
}
return result;
}
void showResult(int cpu_result, int gpu_result) {
printf("cpu_result: %d, gpu_result: %d\n", cpu_result, gpu_result);
}
//// 1. reduction neighbored pairs kernel
__global__ void redunction_v1(int* input, int* output, int size) {
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid >= size) return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2) {
if (tid % (2 * offset) == 0) {
input[gid] += input[gid + offset];
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = input[gid];
}
}
//// 2. warp_divergence_improved of #1 reduction_v1
__global__ void reduction_v1_improved(int* input, int* output, int size) {
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
// local data block pointer
int* i_data = input + blockDim.x * blockIdx.x;
if (gid >= size) return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2) {
int index = 2 * offset * tid;
if (index < blockDim.x) {
i_data[index] += i_data[index + offset];
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = input[gid];
}
}
int main(int argc, char** argv) {
// int size = 1 << 27; // 128 Mb of data
int size = 512;
dim3 block(128);
dim3 grid(size / block.x);
int* h_ref = (int*)malloc(grid.x * sizeof(int));
int gpu_result;
//// input
int* h_input;
h_input = (int*)malloc(size * sizeof(int));
initialize(h_input, size);
//// cpu redunction
int cpu_result = cpu_reduction(h_input, size);
//// gpu redunction
int *d_input, *d_output;
hipMalloc((void**)&d_input, size * sizeof(int));
hipMalloc((void**)&d_output, grid.x * sizeof(int));
//// #1
hipMemcpy(d_input, h_input, size * sizeof(int), hipMemcpyHostToDevice);
hipMemset(d_output, 0, grid.x * sizeof(int));
redunction_v1<<<grid, block>>>(d_input, d_output, size);
hipDeviceSynchronize();
memset(h_ref, 0, grid.x * sizeof(int));
hipMemcpy(h_ref, d_output, grid.x * sizeof(int), hipMemcpyDeviceToHost);
gpu_result = 0;
for (int i = 0; i < grid.x; i++) {
gpu_result += h_ref[i];
}
showResult(cpu_result, gpu_result);
//// #2
hipMemcpy(d_input, h_input, size * sizeof(int), hipMemcpyHostToDevice);
hipMemset(d_output, 0, grid.x * sizeof(int));
reduction_v1_improved<<<grid, block>>>(d_input, d_output, size);
hipDeviceSynchronize();
memset(h_ref, 0, grid.x * sizeof(int));
hipMemcpy(h_ref, d_output, grid.x * sizeof(int), hipMemcpyDeviceToHost);
gpu_result = 0;
for (int i = 0; i < grid.x; i++) {
gpu_result += h_ref[i];
}
showResult(cpu_result, gpu_result);
hipFree(d_output);
hipFree(d_input);
free(h_ref);
free(h_input);
hipDeviceReset();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13redunction_v1PiS_i
.globl _Z13redunction_v1PiS_i
.p2align 8
.type _Z13redunction_v1PiS_i,@function
_Z13redunction_v1PiS_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x10
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
v_cmp_gt_i32_e32 vcc_lo, s4, v1
s_and_saveexec_b32 s4, vcc_lo
s_cbranch_execz .LBB0_8
s_load_b64 s[4:5], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_cmp_lt_u32 s3, 2
s_cbranch_scc1 .LBB0_6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_mov_b32 s7, 1
s_lshr_b32 s3, s3, 1
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_4
.p2align 6
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s8
s_cmp_gt_u32 s6, s3
s_mov_b32 s7, s6
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_6
.LBB0_4:
s_lshl_b32 s6, s7, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s8, s6, -1
v_and_b32_e32 v5, s8, v0
s_mov_b32 s8, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v5
s_cbranch_execz .LBB0_3
v_add_nc_u32_e32 v5, s7, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v6, 31, v5
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v5, vcc_lo, s4, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo
s_clause 0x1
global_load_b32 v5, v[5:6], off
global_load_b32 v6, v[3:4], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v5, v6, v5
global_store_b32 v[3:4], v5, off
s_branch .LBB0_3
.LBB0_6:
s_set_inst_prefetch_distance 0x2
v_cmp_eq_u32_e32 vcc_lo, 0, v0
s_mov_b32 s3, 0
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_8
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
global_load_b32 v0, v[0:1], off
v_mov_b32_e32 v1, 0
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
s_waitcnt vmcnt(0)
global_store_b32 v1, v0, s[0:1]
.LBB0_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13redunction_v1PiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13redunction_v1PiS_i, .Lfunc_end0-_Z13redunction_v1PiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z21reduction_v1_improvedPiS_i
.globl _Z21reduction_v1_improvedPiS_i
.p2align 8
.type _Z21reduction_v1_improvedPiS_i,@function
_Z21reduction_v1_improvedPiS_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s6, s15, s3
v_add_nc_u32_e32 v1, s6, v0
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s4, v1
s_and_saveexec_b32 s4, vcc_lo
s_cbranch_execz .LBB1_8
s_load_b64 s[4:5], s[0:1], 0x0
s_mov_b32 s2, s15
s_cmp_lt_u32 s3, 2
s_cbranch_scc1 .LBB1_6
s_mov_b32 s7, 0
s_mov_b32 s11, 1
s_lshl_b64 s[8:9], s[6:7], 2
s_lshr_b32 s6, s3, 1
s_waitcnt lgkmcnt(0)
s_add_u32 s7, s4, s8
s_addc_u32 s8, s5, s9
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_4
.p2align 6
.LBB1_3:
s_or_b32 exec_lo, exec_lo, s10
s_cmp_gt_u32 s9, s6
s_mov_b32 s11, s9
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB1_6
.LBB1_4:
s_lshl_b32 s9, s11, 1
s_mov_b32 s10, exec_lo
v_mul_lo_u32 v2, s9, v0
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s3, v2
s_cbranch_execz .LBB1_3
v_dual_mov_b32 v5, 0 :: v_dual_add_nc_u32 v4, s11, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mov_b32_e32 v3, v5
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v4, vcc_lo, s7, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v5, vcc_lo, s8, v5, vcc_lo
v_add_co_u32 v2, vcc_lo, s7, v2
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v3, vcc_lo, s8, v3, vcc_lo
s_clause 0x1
global_load_b32 v4, v[4:5], off
global_load_b32 v5, v[2:3], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v4, v5, v4
global_store_b32 v[2:3], v4, off
s_branch .LBB1_3
.LBB1_6:
s_set_inst_prefetch_distance 0x2
v_cmp_eq_u32_e32 vcc_lo, 0, v0
s_mov_b32 s3, 0
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB1_8
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
global_load_b32 v0, v[0:1], off
v_mov_b32_e32 v1, 0
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
s_waitcnt vmcnt(0)
global_store_b32 v1, v0, s[0:1]
.LBB1_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z21reduction_v1_improvedPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z21reduction_v1_improvedPiS_i, .Lfunc_end1-_Z21reduction_v1_improvedPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13redunction_v1PiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13redunction_v1PiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z21reduction_v1_improvedPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z21reduction_v1_improvedPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
void initialize(int* input, int size) {
for (int index = 0; index < size; index++) {
input[index] = index;
}
}
int cpu_reduction(int* input, int size) {
int result = 0;
for (int index = 0; index < size; index++) {
result += index;
}
return result;
}
void showResult(int cpu_result, int gpu_result) {
printf("cpu_result: %d, gpu_result: %d\n", cpu_result, gpu_result);
}
//// 1. reduction neighbored pairs kernel
__global__ void redunction_v1(int* input, int* output, int size) {
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid >= size) return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2) {
if (tid % (2 * offset) == 0) {
input[gid] += input[gid + offset];
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = input[gid];
}
}
//// 2. warp_divergence_improved of #1 reduction_v1
__global__ void reduction_v1_improved(int* input, int* output, int size) {
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
// local data block pointer
int* i_data = input + blockDim.x * blockIdx.x;
if (gid >= size) return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2) {
int index = 2 * offset * tid;
if (index < blockDim.x) {
i_data[index] += i_data[index + offset];
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = input[gid];
}
}
int main(int argc, char** argv) {
// int size = 1 << 27; // 128 Mb of data
int size = 512;
dim3 block(128);
dim3 grid(size / block.x);
int* h_ref = (int*)malloc(grid.x * sizeof(int));
int gpu_result;
//// input
int* h_input;
h_input = (int*)malloc(size * sizeof(int));
initialize(h_input, size);
//// cpu redunction
int cpu_result = cpu_reduction(h_input, size);
//// gpu redunction
int *d_input, *d_output;
hipMalloc((void**)&d_input, size * sizeof(int));
hipMalloc((void**)&d_output, grid.x * sizeof(int));
//// #1
hipMemcpy(d_input, h_input, size * sizeof(int), hipMemcpyHostToDevice);
hipMemset(d_output, 0, grid.x * sizeof(int));
redunction_v1<<<grid, block>>>(d_input, d_output, size);
hipDeviceSynchronize();
memset(h_ref, 0, grid.x * sizeof(int));
hipMemcpy(h_ref, d_output, grid.x * sizeof(int), hipMemcpyDeviceToHost);
gpu_result = 0;
for (int i = 0; i < grid.x; i++) {
gpu_result += h_ref[i];
}
showResult(cpu_result, gpu_result);
//// #2
hipMemcpy(d_input, h_input, size * sizeof(int), hipMemcpyHostToDevice);
hipMemset(d_output, 0, grid.x * sizeof(int));
reduction_v1_improved<<<grid, block>>>(d_input, d_output, size);
hipDeviceSynchronize();
memset(h_ref, 0, grid.x * sizeof(int));
hipMemcpy(h_ref, d_output, grid.x * sizeof(int), hipMemcpyDeviceToHost);
gpu_result = 0;
for (int i = 0; i < grid.x; i++) {
gpu_result += h_ref[i];
}
showResult(cpu_result, gpu_result);
hipFree(d_output);
hipFree(d_input);
free(h_ref);
free(h_input);
hipDeviceReset();
return 0;
} | .text
.file "reduction.hip"
.globl _Z10initializePii # -- Begin function _Z10initializePii
.p2align 4, 0x90
.type _Z10initializePii,@function
_Z10initializePii: # @_Z10initializePii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_3
# %bb.1: # %.lr.ph.preheader
movl %esi, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %ecx, (%rdi,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB0_2
.LBB0_3: # %._crit_edge
retq
.Lfunc_end0:
.size _Z10initializePii, .Lfunc_end0-_Z10initializePii
.cfi_endproc
# -- End function
.globl _Z13cpu_reductionPii # -- Begin function _Z13cpu_reductionPii
.p2align 4, 0x90
.type _Z13cpu_reductionPii,@function
_Z13cpu_reductionPii: # @_Z13cpu_reductionPii
.cfi_startproc
# %bb.0:
# kill: def $esi killed $esi def $rsi
testl %esi, %esi
jle .LBB1_1
# %bb.2: # %._crit_edge.loopexit
leal -1(%rsi), %eax
leal -2(%rsi), %ecx
imulq %rax, %rcx
shrq %rcx
leal (%rsi,%rcx), %eax
decl %eax
retq
.LBB1_1:
xorl %eax, %eax
retq
.Lfunc_end1:
.size _Z13cpu_reductionPii, .Lfunc_end1-_Z13cpu_reductionPii
.cfi_endproc
# -- End function
.globl _Z10showResultii # -- Begin function _Z10showResultii
.p2align 4, 0x90
.type _Z10showResultii,@function
_Z10showResultii: # @_Z10showResultii
.cfi_startproc
# %bb.0:
movl %esi, %edx
movl %edi, %esi
movl $.L.str, %edi
xorl %eax, %eax
jmp printf # TAILCALL
.Lfunc_end2:
.size _Z10showResultii, .Lfunc_end2-_Z10showResultii
.cfi_endproc
# -- End function
.globl _Z28__device_stub__redunction_v1PiS_i # -- Begin function _Z28__device_stub__redunction_v1PiS_i
.p2align 4, 0x90
.type _Z28__device_stub__redunction_v1PiS_i,@function
_Z28__device_stub__redunction_v1PiS_i: # @_Z28__device_stub__redunction_v1PiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13redunction_v1PiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end3:
.size _Z28__device_stub__redunction_v1PiS_i, .Lfunc_end3-_Z28__device_stub__redunction_v1PiS_i
.cfi_endproc
# -- End function
.globl _Z36__device_stub__reduction_v1_improvedPiS_i # -- Begin function _Z36__device_stub__reduction_v1_improvedPiS_i
.p2align 4, 0x90
.type _Z36__device_stub__reduction_v1_improvedPiS_i,@function
_Z36__device_stub__reduction_v1_improvedPiS_i: # @_Z36__device_stub__reduction_v1_improvedPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z21reduction_v1_improvedPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end4:
.size _Z36__device_stub__reduction_v1_improvedPiS_i, .Lfunc_end4-_Z36__device_stub__reduction_v1_improvedPiS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $16, %edi
callq malloc
movq %rax, %rbx
movl $2048, %edi # imm = 0x800
callq malloc
movq %rax, %r14
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movl %eax, (%r14,%rax,4)
incq %rax
cmpq $512, %rax # imm = 0x200
jne .LBB5_1
# %bb.2: # %_Z10initializePii.exit
movabsq $4294967300, %r15 # imm = 0x100000004
leaq 24(%rsp), %rdi
movl $2048, %esi # imm = 0x800
callq hipMalloc
leaq 8(%rsp), %rdi
movl $16, %esi
callq hipMalloc
movq 24(%rsp), %rdi
movl $2048, %edx # imm = 0x800
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
xorl %ebp, %ebp
movl $16, %edx
xorl %esi, %esi
callq hipMemset
leaq 124(%r15), %r12
movq %r15, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_4
# %bb.3:
movq 24(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl $512, 20(%rsp) # imm = 0x200
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13redunction_v1PiS_i, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_4:
callq hipDeviceSynchronize
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movq 8(%rsp), %rsi
movl $16, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_5: # =>This Inner Loop Header: Depth=1
addl (%rbx,%rax,4), %ebp
incq %rax
cmpq $4, %rax
jne .LBB5_5
# %bb.6:
xorl %r13d, %r13d
movl $.L.str, %edi
movl $130816, %esi # imm = 0x1FF00
movl %ebp, %edx
xorl %eax, %eax
callq printf
movq 24(%rsp), %rdi
movl $2048, %edx # imm = 0x800
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $16, %edx
xorl %esi, %esi
callq hipMemset
movq %r15, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_8
# %bb.7:
movq 24(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl $512, 20(%rsp) # imm = 0x200
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z21reduction_v1_improvedPiS_i, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_8:
callq hipDeviceSynchronize
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movq 8(%rsp), %rsi
movl $16, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_9: # =>This Inner Loop Header: Depth=1
addl (%rbx,%rax,4), %r13d
incq %rax
cmpq $4, %rax
jne .LBB5_9
# %bb.10:
movl $.L.str, %edi
movl $130816, %esi # imm = 0x1FF00
movl %r13d, %edx
xorl %eax, %eax
callq printf
movq 8(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
callq hipDeviceReset
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13redunction_v1PiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21reduction_v1_improvedPiS_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "cpu_result: %d, gpu_result: %d\n"
.size .L.str, 32
.type _Z13redunction_v1PiS_i,@object # @_Z13redunction_v1PiS_i
.section .rodata,"a",@progbits
.globl _Z13redunction_v1PiS_i
.p2align 3, 0x0
_Z13redunction_v1PiS_i:
.quad _Z28__device_stub__redunction_v1PiS_i
.size _Z13redunction_v1PiS_i, 8
.type _Z21reduction_v1_improvedPiS_i,@object # @_Z21reduction_v1_improvedPiS_i
.globl _Z21reduction_v1_improvedPiS_i
.p2align 3, 0x0
_Z21reduction_v1_improvedPiS_i:
.quad _Z36__device_stub__reduction_v1_improvedPiS_i
.size _Z21reduction_v1_improvedPiS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13redunction_v1PiS_i"
.size .L__unnamed_1, 23
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z21reduction_v1_improvedPiS_i"
.size .L__unnamed_2, 31
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__redunction_v1PiS_i
.addrsig_sym _Z36__device_stub__reduction_v1_improvedPiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13redunction_v1PiS_i
.addrsig_sym _Z21reduction_v1_improvedPiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z21reduction_v1_improvedPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */
/* 0x000e280000002500 */
/*0020*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e620000002100 */
/*0030*/ IMAD R0, R8, c[0x0][0x0], RZ ; /* 0x0000000008007a24 */
/* 0x001fc800078e02ff */
/*0040*/ IMAD.IADD R6, R0, 0x1, R9 ; /* 0x0000000100067824 */
/* 0x002fca00078e0209 */
/*0050*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x170], PT ; /* 0x00005c0006007a0c */
/* 0x000fda0003f06270 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe40000000800 */
/*0080*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fe40008011604 */
/*0090*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fc80000000a00 */
/*00a0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fda000bf05270 */
/*00b0*/ @!P0 BRA 0x240 ; /* 0x0000018000008947 */
/* 0x000fea0003800000 */
/*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x1 ; /* 0x00000001ff027424 */
/* 0x000fc800078e00ff */
/*00d0*/ IMAD.SHL.U32 R14, R2, 0x2, RZ ; /* 0x00000002020e7824 */
/* 0x000fe200078e00ff */
/*00e0*/ BSSY B0, 0x200 ; /* 0x0000011000007945 */
/* 0x000fe60003800000 */
/*00f0*/ IMAD R11, R14, R9, RZ ; /* 0x000000090e0b7224 */
/* 0x000fca00078e02ff */
/*0100*/ ISETP.GE.U32.AND P0, PT, R11, c[0x0][0x0], PT ; /* 0x000000000b007a0c */
/* 0x000fda0003f06070 */
/*0110*/ @P0 BRA 0x1f0 ; /* 0x000000d000000947 */
/* 0x000fea0003800000 */
/*0120*/ IMAD.IADD R3, R11, 0x1, R2 ; /* 0x000000010b037824 */
/* 0x000fe200078e0202 */
/*0130*/ IADD3 R5, P0, R0, R11, RZ ; /* 0x0000000b00057210 */
/* 0x000fc80007f1e0ff */
/*0140*/ IADD3 R7, P1, R0, R3, RZ ; /* 0x0000000300077210 */
/* 0x000fe40007f3e0ff */
/*0150*/ LEA.HI.X.SX32 R10, R11, RZ, 0x1, P0 ; /* 0x000000ff0b0a7211 */
/* 0x000fe400000f0eff */
/*0160*/ LEA R2, P0, R5, c[0x0][0x160], 0x2 ; /* 0x0000580005027a11 */
/* 0x000fe400078010ff */
/*0170*/ LEA.HI.X.SX32 R12, R3, RZ, 0x1, P1 ; /* 0x000000ff030c7211 */
/* 0x000fe400008f0eff */
/*0180*/ LEA R4, P1, R7, c[0x0][0x160], 0x2 ; /* 0x0000580007047a11 */
/* 0x000fe400078210ff */
/*0190*/ LEA.HI.X R3, R5, c[0x0][0x164], R10, 0x2, P0 ; /* 0x0000590005037a11 */
/* 0x000fc400000f140a */
/*01a0*/ LEA.HI.X R5, R7, c[0x0][0x164], R12, 0x2, P1 ; /* 0x0000590007057a11 */
/* 0x000fc600008f140c */
/*01b0*/ LDG.E R7, [R2.64] ; /* 0x0000000602077981 */
/* 0x000ea8000c1e1900 */
/*01c0*/ LDG.E R4, [R4.64] ; /* 0x0000000604047981 */
/* 0x000ea4000c1e1900 */
/*01d0*/ IMAD.IADD R7, R7, 0x1, R4 ; /* 0x0000000107077824 */
/* 0x004fca00078e0204 */
/*01e0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e4000c101906 */
/*01f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0200*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0210*/ ISETP.GT.U32.AND P0, PT, R14, UR4, PT ; /* 0x000000040e007c0c */
/* 0x000fe2000bf04070 */
/*0220*/ IMAD.MOV.U32 R2, RZ, RZ, R14 ; /* 0x000000ffff027224 */
/* 0x001fd800078e000e */
/*0230*/ @!P0 BRA 0xd0 ; /* 0xfffffe9000008947 */
/* 0x000fea000383ffff */
/*0240*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fda0003f05270 */
/*0250*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0260*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fc800078e00ff */
/*0270*/ IMAD.WIDE R2, R6, R5, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fcc00078e0205 */
/*0280*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000ea2000c1e1900 */
/*0290*/ IMAD.WIDE.U32 R4, R8, R5, c[0x0][0x168] ; /* 0x00005a0008047625 */
/* 0x000fca00078e0005 */
/*02a0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101906 */
/*02b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02c0*/ BRA 0x2c0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13redunction_v1PiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R0, c[0x0][0x0], R5 ; /* 0x0000000000027a24 */
/* 0x001fca00078e0205 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0070*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0080*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fe40008011604 */
/*0090*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*00a0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fc600078e0203 */
/*00b0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fda000bf05270 */
/*00c0*/ @!P0 BRA 0x340 ; /* 0x0000027000008947 */
/* 0x000fea0003800000 */
/*00d0*/ HFMA2.MMA R7, -RZ, RZ, 0, 5.9604644775390625e-08 ; /* 0x00000001ff077435 */
/* 0x000fe200000001ff */
/*00e0*/ IABS R4, R5 ; /* 0x0000000500047213 */
/* 0x000fd20000000000 */
/*00f0*/ IMAD.SHL.U32 R6, R7, 0x2, RZ ; /* 0x0000000207067824 */
/* 0x000fe200078e00ff */
/*0100*/ IABS R14, R5 ; /* 0x00000005000e7213 */
/* 0x000fe20000000000 */
/*0110*/ BSSY B0, 0x300 ; /* 0x000001e000007945 */
/* 0x000fe20003800000 */
/*0120*/ ISETP.GE.AND P2, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe40003f46270 */
/*0130*/ IABS R12, R6.reuse ; /* 0x00000006000c7213 */
/* 0x080fe40000000000 */
/*0140*/ IABS R13, R6 ; /* 0x00000006000d7213 */
/* 0x000fe40000000000 */
/*0150*/ I2F.RP R10, R12 ; /* 0x0000000c000a7306 */
/* 0x000e300000209400 */
/*0160*/ MUFU.RCP R10, R10 ; /* 0x0000000a000a7308 */
/* 0x001e240000001000 */
/*0170*/ IADD3 R8, R10, 0xffffffe, RZ ; /* 0x0ffffffe0a087810 */
/* 0x001fe20007ffe0ff */
/*0180*/ IMAD.MOV R10, RZ, RZ, -R13 ; /* 0x000000ffff0a7224 */
/* 0x000fca00078e0a0d */
/*0190*/ F2I.FTZ.U32.TRUNC.NTZ R9, R8 ; /* 0x0000000800097305 */
/* 0x000064000021f000 */
/*01a0*/ MOV R8, RZ ; /* 0x000000ff00087202 */
/* 0x001fe20000000f00 */
/*01b0*/ IMAD.MOV R11, RZ, RZ, -R9 ; /* 0x000000ffff0b7224 */
/* 0x002fc800078e0a09 */
/*01c0*/ IMAD R11, R11, R12, RZ ; /* 0x0000000c0b0b7224 */
/* 0x000fc800078e02ff */
/*01d0*/ IMAD.HI.U32 R9, R9, R11, R8 ; /* 0x0000000b09097227 */
/* 0x000fc800078e0008 */
/*01e0*/ IMAD.MOV.U32 R11, RZ, RZ, R14 ; /* 0x000000ffff0b7224 */
/* 0x000fe400078e000e */
/*01f0*/ IMAD.HI.U32 R9, R9, R4, RZ ; /* 0x0000000409097227 */
/* 0x000fc800078e00ff */
/*0200*/ IMAD R9, R9, R10, R11 ; /* 0x0000000a09097224 */
/* 0x000fca00078e020b */
/*0210*/ ISETP.GT.U32.AND P0, PT, R12, R9, PT ; /* 0x000000090c00720c */
/* 0x000fda0003f04070 */
/*0220*/ @!P0 IADD3 R9, R9, -R12, RZ ; /* 0x8000000c09098210 */
/* 0x000fe40007ffe0ff */
/*0230*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003f05270 */
/*0240*/ ISETP.GT.U32.AND P1, PT, R12, R9, PT ; /* 0x000000090c00720c */
/* 0x000fda0003f24070 */
/*0250*/ @!P1 IMAD.IADD R9, R9, 0x1, -R12 ; /* 0x0000000109099824 */
/* 0x000fc800078e0a0c */
/*0260*/ @!P2 IMAD.MOV R9, RZ, RZ, -R9 ; /* 0x000000ffff09a224 */
/* 0x000fe200078e0a09 */
/*0270*/ @!P0 LOP3.LUT R9, RZ, R6, RZ, 0x33, !PT ; /* 0x00000006ff098212 */
/* 0x000fc800078e33ff */
/*0280*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fda0003f05270 */
/*0290*/ @P0 BRA 0x2f0 ; /* 0x0000005000000947 */
/* 0x000fea0003800000 */
/*02a0*/ IMAD.WIDE R8, R7, 0x4, R2 ; /* 0x0000000407087825 */
/* 0x000fe400078e0202 */
/*02b0*/ LDG.E R7, [R2.64] ; /* 0x0000000602077981 */
/* 0x000ea8000c1e1900 */
/*02c0*/ LDG.E R8, [R8.64] ; /* 0x0000000608087981 */
/* 0x000ea4000c1e1900 */
/*02d0*/ IADD3 R7, R7, R8, RZ ; /* 0x0000000807077210 */
/* 0x004fca0007ffe0ff */
/*02e0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e4000c101906 */
/*02f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0300*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0310*/ ISETP.GT.U32.AND P0, PT, R6, UR4, PT ; /* 0x0000000406007c0c */
/* 0x000fe2000bf04070 */
/*0320*/ IMAD.MOV.U32 R7, RZ, RZ, R6 ; /* 0x000000ffff077224 */
/* 0x001fd800078e0006 */
/*0330*/ @!P0 BRA 0xf0 ; /* 0xfffffdb000008947 */
/* 0x000fea000383ffff */
/*0340*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05270 */
/*0350*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0360*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000ea2000c1e1900 */
/*0370*/ MOV R5, 0x4 ; /* 0x0000000400057802 */
/* 0x000fca0000000f00 */
/*0380*/ IMAD.WIDE.U32 R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fca00078e0005 */
/*0390*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101906 */
/*03a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03b0*/ BRA 0x3b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13redunction_v1PiS_i
.globl _Z13redunction_v1PiS_i
.p2align 8
.type _Z13redunction_v1PiS_i,@function
_Z13redunction_v1PiS_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x10
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
v_cmp_gt_i32_e32 vcc_lo, s4, v1
s_and_saveexec_b32 s4, vcc_lo
s_cbranch_execz .LBB0_8
s_load_b64 s[4:5], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_cmp_lt_u32 s3, 2
s_cbranch_scc1 .LBB0_6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_mov_b32 s7, 1
s_lshr_b32 s3, s3, 1
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_4
.p2align 6
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s8
s_cmp_gt_u32 s6, s3
s_mov_b32 s7, s6
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_6
.LBB0_4:
s_lshl_b32 s6, s7, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s8, s6, -1
v_and_b32_e32 v5, s8, v0
s_mov_b32 s8, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v5
s_cbranch_execz .LBB0_3
v_add_nc_u32_e32 v5, s7, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v6, 31, v5
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v5, vcc_lo, s4, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo
s_clause 0x1
global_load_b32 v5, v[5:6], off
global_load_b32 v6, v[3:4], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v5, v6, v5
global_store_b32 v[3:4], v5, off
s_branch .LBB0_3
.LBB0_6:
s_set_inst_prefetch_distance 0x2
v_cmp_eq_u32_e32 vcc_lo, 0, v0
s_mov_b32 s3, 0
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_8
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
global_load_b32 v0, v[0:1], off
v_mov_b32_e32 v1, 0
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
s_waitcnt vmcnt(0)
global_store_b32 v1, v0, s[0:1]
.LBB0_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13redunction_v1PiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13redunction_v1PiS_i, .Lfunc_end0-_Z13redunction_v1PiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z21reduction_v1_improvedPiS_i
.globl _Z21reduction_v1_improvedPiS_i
.p2align 8
.type _Z21reduction_v1_improvedPiS_i,@function
_Z21reduction_v1_improvedPiS_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s6, s15, s3
v_add_nc_u32_e32 v1, s6, v0
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s4, v1
s_and_saveexec_b32 s4, vcc_lo
s_cbranch_execz .LBB1_8
s_load_b64 s[4:5], s[0:1], 0x0
s_mov_b32 s2, s15
s_cmp_lt_u32 s3, 2
s_cbranch_scc1 .LBB1_6
s_mov_b32 s7, 0
s_mov_b32 s11, 1
s_lshl_b64 s[8:9], s[6:7], 2
s_lshr_b32 s6, s3, 1
s_waitcnt lgkmcnt(0)
s_add_u32 s7, s4, s8
s_addc_u32 s8, s5, s9
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_4
.p2align 6
.LBB1_3:
s_or_b32 exec_lo, exec_lo, s10
s_cmp_gt_u32 s9, s6
s_mov_b32 s11, s9
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB1_6
.LBB1_4:
s_lshl_b32 s9, s11, 1
s_mov_b32 s10, exec_lo
v_mul_lo_u32 v2, s9, v0
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s3, v2
s_cbranch_execz .LBB1_3
v_dual_mov_b32 v5, 0 :: v_dual_add_nc_u32 v4, s11, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mov_b32_e32 v3, v5
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v4, vcc_lo, s7, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v5, vcc_lo, s8, v5, vcc_lo
v_add_co_u32 v2, vcc_lo, s7, v2
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v3, vcc_lo, s8, v3, vcc_lo
s_clause 0x1
global_load_b32 v4, v[4:5], off
global_load_b32 v5, v[2:3], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v4, v5, v4
global_store_b32 v[2:3], v4, off
s_branch .LBB1_3
.LBB1_6:
s_set_inst_prefetch_distance 0x2
v_cmp_eq_u32_e32 vcc_lo, 0, v0
s_mov_b32 s3, 0
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB1_8
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
global_load_b32 v0, v[0:1], off
v_mov_b32_e32 v1, 0
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
s_waitcnt vmcnt(0)
global_store_b32 v1, v0, s[0:1]
.LBB1_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z21reduction_v1_improvedPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z21reduction_v1_improvedPiS_i, .Lfunc_end1-_Z21reduction_v1_improvedPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13redunction_v1PiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13redunction_v1PiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z21reduction_v1_improvedPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z21reduction_v1_improvedPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00110448_00000000-6_reduction.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10initializePii
.type _Z10initializePii, @function
_Z10initializePii:
.LFB2057:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L3
movslq %esi, %rsi
movl $0, %eax
.L5:
movl %eax, (%rdi,%rax,4)
addq $1, %rax
cmpq %rsi, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE2057:
.size _Z10initializePii, .-_Z10initializePii
.globl _Z13cpu_reductionPii
.type _Z13cpu_reductionPii, @function
_Z13cpu_reductionPii:
.LFB2058:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L10
movl $0, %eax
movl $0, %edx
.L9:
addl %eax, %edx
addl $1, %eax
cmpl %eax, %esi
jne .L9
.L7:
movl %edx, %eax
ret
.L10:
movl $0, %edx
jmp .L7
.cfi_endproc
.LFE2058:
.size _Z13cpu_reductionPii, .-_Z13cpu_reductionPii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "cpu_result: %d, gpu_result: %d\n"
.text
.globl _Z10showResultii
.type _Z10showResultii, @function
_Z10showResultii:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl %edi, %edx
movl %esi, %ecx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z10showResultii, .-_Z10showResultii
.globl _Z36__device_stub__Z13redunction_v1PiS_iPiS_i
.type _Z36__device_stub__Z13redunction_v1PiS_iPiS_i, @function
_Z36__device_stub__Z13redunction_v1PiS_iPiS_i:
.LFB2085:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L18
.L14:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L19
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13redunction_v1PiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L14
.L19:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z36__device_stub__Z13redunction_v1PiS_iPiS_i, .-_Z36__device_stub__Z13redunction_v1PiS_iPiS_i
.globl _Z13redunction_v1PiS_i
.type _Z13redunction_v1PiS_i, @function
_Z13redunction_v1PiS_i:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z13redunction_v1PiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z13redunction_v1PiS_i, .-_Z13redunction_v1PiS_i
.globl _Z44__device_stub__Z21reduction_v1_improvedPiS_iPiS_i
.type _Z44__device_stub__Z21reduction_v1_improvedPiS_iPiS_i, @function
_Z44__device_stub__Z21reduction_v1_improvedPiS_iPiS_i:
.LFB2087:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L26
.L22:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L27
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z21reduction_v1_improvedPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L22
.L27:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z44__device_stub__Z21reduction_v1_improvedPiS_iPiS_i, .-_Z44__device_stub__Z21reduction_v1_improvedPiS_iPiS_i
.globl _Z21reduction_v1_improvedPiS_i
.type _Z21reduction_v1_improvedPiS_i, @function
_Z21reduction_v1_improvedPiS_i:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z21reduction_v1_improvedPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z21reduction_v1_improvedPiS_i, .-_Z21reduction_v1_improvedPiS_i
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $56, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $16, %edi
call malloc@PLT
movq %rax, %rbx
movl $2048, %edi
call malloc@PLT
movq %rax, %rbp
movl $512, %esi
movq %rax, %rdi
call _Z10initializePii
movl $512, %eax
.L31:
subl $1, %eax
jne .L31
movq %rsp, %rdi
movl $2048, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $16, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $2048, %edx
movq %rbp, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $16, %edx
movl $0, %esi
movq 8(%rsp), %rdi
call cudaMemset@PLT
movl $4, 28(%rsp)
movl $128, 16(%rsp)
movl 24(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 16(%rsp), %rdx
movq 28(%rsp), %rdi
movl 36(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L37
.L32:
call cudaDeviceSynchronize@PLT
pxor %xmm0, %xmm0
movups %xmm0, (%rbx)
movl $2, %ecx
movl $16, %edx
movq 8(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl 4(%rbx), %esi
addl (%rbx), %esi
addl 8(%rbx), %esi
addl 12(%rbx), %esi
movl $130816, %edi
call _Z10showResultii
movl $1, %ecx
movl $2048, %edx
movq %rbp, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $16, %edx
movl $0, %esi
movq 8(%rsp), %rdi
call cudaMemset@PLT
movl 24(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 16(%rsp), %rdx
movq 28(%rsp), %rdi
movl 36(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L38
.L33:
call cudaDeviceSynchronize@PLT
pxor %xmm0, %xmm0
movups %xmm0, (%rbx)
movl $2, %ecx
movl $16, %edx
movq 8(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl 4(%rbx), %esi
addl (%rbx), %esi
addl 8(%rbx), %esi
addl 12(%rbx), %esi
movl $130816, %edi
call _Z10showResultii
movq 8(%rsp), %rdi
call cudaFree@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq %rbx, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
call cudaDeviceReset@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L39
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L37:
.cfi_restore_state
movl $512, %edx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z36__device_stub__Z13redunction_v1PiS_iPiS_i
jmp .L32
.L38:
movl $512, %edx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z44__device_stub__Z21reduction_v1_improvedPiS_iPiS_i
jmp .L33
.L39:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC1:
.string "_Z21reduction_v1_improvedPiS_i"
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "_Z13redunction_v1PiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z21reduction_v1_improvedPiS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z13redunction_v1PiS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "reduction.hip"
.globl _Z10initializePii # -- Begin function _Z10initializePii
.p2align 4, 0x90
.type _Z10initializePii,@function
_Z10initializePii: # @_Z10initializePii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_3
# %bb.1: # %.lr.ph.preheader
movl %esi, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %ecx, (%rdi,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB0_2
.LBB0_3: # %._crit_edge
retq
.Lfunc_end0:
.size _Z10initializePii, .Lfunc_end0-_Z10initializePii
.cfi_endproc
# -- End function
.globl _Z13cpu_reductionPii # -- Begin function _Z13cpu_reductionPii
.p2align 4, 0x90
.type _Z13cpu_reductionPii,@function
_Z13cpu_reductionPii: # @_Z13cpu_reductionPii
.cfi_startproc
# %bb.0:
# kill: def $esi killed $esi def $rsi
testl %esi, %esi
jle .LBB1_1
# %bb.2: # %._crit_edge.loopexit
leal -1(%rsi), %eax
leal -2(%rsi), %ecx
imulq %rax, %rcx
shrq %rcx
leal (%rsi,%rcx), %eax
decl %eax
retq
.LBB1_1:
xorl %eax, %eax
retq
.Lfunc_end1:
.size _Z13cpu_reductionPii, .Lfunc_end1-_Z13cpu_reductionPii
.cfi_endproc
# -- End function
.globl _Z10showResultii # -- Begin function _Z10showResultii
.p2align 4, 0x90
.type _Z10showResultii,@function
_Z10showResultii: # @_Z10showResultii
.cfi_startproc
# %bb.0:
movl %esi, %edx
movl %edi, %esi
movl $.L.str, %edi
xorl %eax, %eax
jmp printf # TAILCALL
.Lfunc_end2:
.size _Z10showResultii, .Lfunc_end2-_Z10showResultii
.cfi_endproc
# -- End function
.globl _Z28__device_stub__redunction_v1PiS_i # -- Begin function _Z28__device_stub__redunction_v1PiS_i
.p2align 4, 0x90
.type _Z28__device_stub__redunction_v1PiS_i,@function
_Z28__device_stub__redunction_v1PiS_i: # @_Z28__device_stub__redunction_v1PiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13redunction_v1PiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end3:
.size _Z28__device_stub__redunction_v1PiS_i, .Lfunc_end3-_Z28__device_stub__redunction_v1PiS_i
.cfi_endproc
# -- End function
.globl _Z36__device_stub__reduction_v1_improvedPiS_i # -- Begin function _Z36__device_stub__reduction_v1_improvedPiS_i
.p2align 4, 0x90
.type _Z36__device_stub__reduction_v1_improvedPiS_i,@function
_Z36__device_stub__reduction_v1_improvedPiS_i: # @_Z36__device_stub__reduction_v1_improvedPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z21reduction_v1_improvedPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end4:
.size _Z36__device_stub__reduction_v1_improvedPiS_i, .Lfunc_end4-_Z36__device_stub__reduction_v1_improvedPiS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $16, %edi
callq malloc
movq %rax, %rbx
movl $2048, %edi # imm = 0x800
callq malloc
movq %rax, %r14
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movl %eax, (%r14,%rax,4)
incq %rax
cmpq $512, %rax # imm = 0x200
jne .LBB5_1
# %bb.2: # %_Z10initializePii.exit
movabsq $4294967300, %r15 # imm = 0x100000004
leaq 24(%rsp), %rdi
movl $2048, %esi # imm = 0x800
callq hipMalloc
leaq 8(%rsp), %rdi
movl $16, %esi
callq hipMalloc
movq 24(%rsp), %rdi
movl $2048, %edx # imm = 0x800
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
xorl %ebp, %ebp
movl $16, %edx
xorl %esi, %esi
callq hipMemset
leaq 124(%r15), %r12
movq %r15, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_4
# %bb.3:
movq 24(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl $512, 20(%rsp) # imm = 0x200
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13redunction_v1PiS_i, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_4:
callq hipDeviceSynchronize
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movq 8(%rsp), %rsi
movl $16, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_5: # =>This Inner Loop Header: Depth=1
addl (%rbx,%rax,4), %ebp
incq %rax
cmpq $4, %rax
jne .LBB5_5
# %bb.6:
xorl %r13d, %r13d
movl $.L.str, %edi
movl $130816, %esi # imm = 0x1FF00
movl %ebp, %edx
xorl %eax, %eax
callq printf
movq 24(%rsp), %rdi
movl $2048, %edx # imm = 0x800
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $16, %edx
xorl %esi, %esi
callq hipMemset
movq %r15, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_8
# %bb.7:
movq 24(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl $512, 20(%rsp) # imm = 0x200
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z21reduction_v1_improvedPiS_i, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_8:
callq hipDeviceSynchronize
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movq 8(%rsp), %rsi
movl $16, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_9: # =>This Inner Loop Header: Depth=1
addl (%rbx,%rax,4), %r13d
incq %rax
cmpq $4, %rax
jne .LBB5_9
# %bb.10:
movl $.L.str, %edi
movl $130816, %esi # imm = 0x1FF00
movl %r13d, %edx
xorl %eax, %eax
callq printf
movq 8(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
callq hipDeviceReset
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13redunction_v1PiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21reduction_v1_improvedPiS_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "cpu_result: %d, gpu_result: %d\n"
.size .L.str, 32
.type _Z13redunction_v1PiS_i,@object # @_Z13redunction_v1PiS_i
.section .rodata,"a",@progbits
.globl _Z13redunction_v1PiS_i
.p2align 3, 0x0
_Z13redunction_v1PiS_i:
.quad _Z28__device_stub__redunction_v1PiS_i
.size _Z13redunction_v1PiS_i, 8
.type _Z21reduction_v1_improvedPiS_i,@object # @_Z21reduction_v1_improvedPiS_i
.globl _Z21reduction_v1_improvedPiS_i
.p2align 3, 0x0
_Z21reduction_v1_improvedPiS_i:
.quad _Z36__device_stub__reduction_v1_improvedPiS_i
.size _Z21reduction_v1_improvedPiS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13redunction_v1PiS_i"
.size .L__unnamed_1, 23
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z21reduction_v1_improvedPiS_i"
.size .L__unnamed_2, 31
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__redunction_v1PiS_i
.addrsig_sym _Z36__device_stub__reduction_v1_improvedPiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13redunction_v1PiS_i
.addrsig_sym _Z21reduction_v1_improvedPiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <assert.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
float* read_array(const char* filename, int len) {
float *x = (float*) malloc(len * sizeof(float));
FILE *fp = fopen(filename, "r");
for( int i=0; i<len; i++){
int r=fscanf(fp,"%f",&x[i]);
if(r == EOF){
rewind(fp);
}
x[i]-=5;
}
fclose(fp);
return x;
}
void computeSum( float* reference, float* idata, const unsigned int len)
{
reference[0] = 0;
double total_sum = 0;
unsigned int i;
for( i = 0; i < len; ++i)
{
total_sum += idata[i];
}
*reference = total_sum;
}
int main( int argc, char** argv)
{
if(argc != 2) {
fprintf(stderr, "usage: ./problem2 N\n");
exit(1);
}
int num_elements = atoi(argv[1]);
float* h_data=read_array("problem1.inp",num_elements);
float reference = 1.0f;
computeSum(&reference , h_data, num_elements);
//start inclusive timing
float time;
cudaEvent_t startIn,stopIn;
cudaEventCreate(&startIn);
cudaEventCreate(&stopIn);
cudaEventRecord(startIn, 0);
int size = num_elements*sizeof(float);
float *d_in;
assert(cudaSuccess == cudaMalloc((void**)&d_in, size));
//copy the memory to device
assert(cudaSuccess == cudaMemcpy(d_in, h_data, size, cudaMemcpyHostToDevice));
//set up the pointer
thrust::device_ptr<float> dev_ptr(d_in);
//perform the thrust reduction
double result = thrust::reduce(dev_ptr,dev_ptr + num_elements, (double) 0.0,thrust::plus<float>());
//stop inclusive timing
cudaEventRecord(stopIn, 0);
cudaEventSynchronize(stopIn);
cudaEventElapsedTime(&time, startIn, stopIn);
cudaEventDestroy(startIn);
cudaEventDestroy(stopIn);
// Run accuracy test
float epsilon = 0.3f;
unsigned int result_regtest = (abs(result - reference) <= epsilon);
if(!result_regtest) printf("Test failed device: %f host: %f\n",result,reference);
//print the outputs
printf("%d\n%f\n%f\n",num_elements, result, time);
//printf("%f\n", time);
// cleanup memory
cudaFree(d_in);
//cudaFree(d_out);
free( h_data);
return 0;
} | #ifdef _WIN32
# define NOMINMAX
#endif
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <assert.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
float* read_array(const char* filename, int len) {
float *x = (float*) malloc(len * sizeof(float));
FILE *fp = fopen(filename, "r");
for( int i=0; i<len; i++){
int r=fscanf(fp,"%f",&x[i]);
if(r == EOF){
rewind(fp);
}
x[i]-=5;
}
fclose(fp);
return x;
}
void computeSum( float* reference, float* idata, const unsigned int len)
{
reference[0] = 0;
double total_sum = 0;
unsigned int i;
for( i = 0; i < len; ++i)
{
total_sum += idata[i];
}
*reference = total_sum;
}
int main( int argc, char** argv)
{
if(argc != 2) {
fprintf(stderr, "usage: ./problem2 N\n");
exit(1);
}
int num_elements = atoi(argv[1]);
float* h_data=read_array("problem1.inp",num_elements);
float reference = 1.0f;
computeSum(&reference , h_data, num_elements);
//start inclusive timing
float time;
hipEvent_t startIn,stopIn;
hipEventCreate(&startIn);
hipEventCreate(&stopIn);
hipEventRecord(startIn, 0);
int size = num_elements*sizeof(float);
float *d_in;
assert(hipSuccess == hipMalloc((void**)&d_in, size));
//copy the memory to device
assert(hipSuccess == hipMemcpy(d_in, h_data, size, hipMemcpyHostToDevice));
//set up the pointer
thrust::device_ptr<float> dev_ptr(d_in);
//perform the thrust reduction
double result = thrust::reduce(dev_ptr,dev_ptr + num_elements, (double) 0.0,thrust::plus<float>());
//stop inclusive timing
hipEventRecord(stopIn, 0);
hipEventSynchronize(stopIn);
hipEventElapsedTime(&time, startIn, stopIn);
hipEventDestroy(startIn);
hipEventDestroy(stopIn);
// Run accuracy test
float epsilon = 0.3f;
unsigned int result_regtest = (abs(result - reference) <= epsilon);
if(!result_regtest) printf("Test failed device: %f host: %f\n",result,reference);
//print the outputs
printf("%d\n%f\n%f\n",num_elements, result, time);
//printf("%f\n", time);
// cleanup memory
hipFree(d_in);
//cudaFree(d_out);
free( h_data);
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(x) printf("%s (%d)\n", cudaGetErrorString(x), __LINE__)
__global__ void kernel(double *a, double *b, double *c, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
// Possible correction: let one thread do more than one calculation.
// If there is less threads than elements to calculate, then some threads will
// do 2 calculations (instead of one).
if (i < N-640) {
c[i+640] = a[i+640] + b[i+640];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++) {
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(cudaMalloc((void**)&d_a, sz_in_bytes));
// Correction:
// checkCudaErrors(cudaMalloc((void**)&d_b, 0));
checkCudaErrors(cudaMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(cudaMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(cudaMemcpy(d_a, h_a, sz_in_bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_b, h_b, sz_in_bytes, cudaMemcpyHostToDevice));
// 640 threads au total.
// But we calculate 1000 values.
// => error.
// One correction is: use enough threads.
// dim3 dimBlock(64, 1, 1);
// dim3 dimGrid(10, 1, 1) => dim3 dimGrid(10, 1, 1);
// Another correction is:
// Let a thread make more than one calculation (see function kernel()).
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(16, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(cudaMemcpy(h_c, d_c, sz_in_bytes, cudaMemcpyDeviceToHost));
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++) {
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16) {
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
} else {
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
} | code for sm_80
Function : _Z6kernelPdS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R11, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff0b7435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R4, R0, R11, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e020b */
/*0070*/ IMAD.WIDE R2, R0, R11, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fe200078e020b */
/*0080*/ LDG.E.64 R6, [R4.64] ; /* 0x0000000604067981 */
/* 0x000ea8000c1e1b00 */
/*0090*/ LDG.E.64 R8, [R2.64] ; /* 0x0000000602087981 */
/* 0x000ea2000c1e1b00 */
/*00a0*/ ULDC UR4, c[0x0][0x178] ; /* 0x00005e0000047ab9 */
/* 0x000fe40000000800 */
/*00b0*/ UIADD3 UR4, UR4, -0x280, URZ ; /* 0xfffffd8004047890 */
/* 0x000fcc000fffe03f */
/*00c0*/ ISETP.GE.AND P0, PT, R0.reuse, UR4, PT ; /* 0x0000000400007c0c */
/* 0x040fe2000bf06270 */
/*00d0*/ DADD R6, R6, R8 ; /* 0x0000000006067229 */
/* 0x0040640000000008 */
/*00e0*/ IMAD.WIDE R8, R0, R11, c[0x0][0x170] ; /* 0x00005c0000087625 */
/* 0x001fca00078e020b */
/*00f0*/ STG.E.64 [R8.64], R6 ; /* 0x0000000608007986 */
/* 0x0021ea000c101b06 */
/*0100*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0110*/ LDG.E.64 R4, [R4.64+0x1400] ; /* 0x0014000604047981 */
/* 0x000ea8000c1e1b00 */
/*0120*/ LDG.E.64 R2, [R2.64+0x1400] ; /* 0x0014000602027981 */
/* 0x000ea4000c1e1b00 */
/*0130*/ DADD R6, R4, R2 ; /* 0x0000000004067229 */
/* 0x005e0e0000000002 */
/*0140*/ STG.E.64 [R8.64+0x1400], R6 ; /* 0x0014000608007986 */
/* 0x001fe2000c101b06 */
/*0150*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0160*/ BRA 0x160; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(x) printf("%s (%d)\n", cudaGetErrorString(x), __LINE__)
__global__ void kernel(double *a, double *b, double *c, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
// Possible correction: let one thread do more than one calculation.
// If there is less threads than elements to calculate, then some threads will
// do 2 calculations (instead of one).
if (i < N-640) {
c[i+640] = a[i+640] + b[i+640];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++) {
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(cudaMalloc((void**)&d_a, sz_in_bytes));
// Correction:
// checkCudaErrors(cudaMalloc((void**)&d_b, 0));
checkCudaErrors(cudaMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(cudaMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(cudaMemcpy(d_a, h_a, sz_in_bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_b, h_b, sz_in_bytes, cudaMemcpyHostToDevice));
// 640 threads au total.
// But we calculate 1000 values.
// => error.
// One correction is: use enough threads.
// dim3 dimBlock(64, 1, 1);
// dim3 dimGrid(10, 1, 1) => dim3 dimGrid(10, 1, 1);
// Another correction is:
// Let a thread make more than one calculation (see function kernel()).
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(16, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(cudaMemcpy(h_c, d_c, sz_in_bytes, cudaMemcpyDeviceToHost));
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++) {
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16) {
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
} else {
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
} | .file "tmpxft_0009252c_00000000-6_err1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
.type _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i, @function
_Z30__device_stub__Z6kernelPdS_S_iPdS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6kernelPdS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i, .-_Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
.globl _Z6kernelPdS_S_i
.type _Z6kernelPdS_S_i, @function
_Z6kernelPdS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6kernelPdS_S_i, .-_Z6kernelPdS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "%s (%d)\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "SUCCESS (Relative error : %.3e)\n"
.align 8
.LC6:
.string "ERROR (Relative error : %.3e)\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $72, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $8000, %edi
call malloc@PLT
movq %rax, %rbp
movl $8000, %edi
call malloc@PLT
movq %rax, %rbx
movl $8000, %edi
call malloc@PLT
movq %rax, %r12
movl $0, %eax
movsd .LC1(%rip), %xmm1
.L12:
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
movapd %xmm0, %xmm2
addsd %xmm1, %xmm2
movapd %xmm1, %xmm3
divsd %xmm2, %xmm3
movsd %xmm3, 0(%rbp,%rax,8)
subsd %xmm1, %xmm0
divsd %xmm2, %xmm0
movsd %xmm0, (%rbx,%rax,8)
addq $1, %rax
cmpq $1000, %rax
jne .L12
leaq 8(%rsp), %rdi
movl $8000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $42, %ecx
leaq .LC2(%rip), %r13
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 16(%rsp), %rdi
movl $8000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $45, %ecx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 24(%rsp), %rdi
movl $8000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $46, %ecx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $8000, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $48, %ecx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $8000, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $49, %ecx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $64, 32(%rsp)
movl $1, 36(%rsp)
movl $16, 44(%rsp)
movl $1, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movl $1, %ecx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L24
.L13:
movl $2, %ecx
movl $8000, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $64, %ecx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movl $0, %eax
pxor %xmm4, %xmm4
movapd %xmm4, %xmm0
movq .LC3(%rip), %xmm5
.L14:
movsd (%r12,%rax), %xmm1
movsd 0(%rbp,%rax), %xmm3
addsd (%rbx,%rax), %xmm3
movapd %xmm1, %xmm2
subsd %xmm3, %xmm2
andpd %xmm5, %xmm2
addsd %xmm2, %xmm0
andpd %xmm5, %xmm1
addsd %xmm1, %xmm4
addq $8, %rax
cmpq $8000, %rax
jne .L14
divsd %xmm4, %xmm0
movsd .LC4(%rip), %xmm1
comisd %xmm0, %xmm1
jbe .L22
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
.L17:
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L25
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
movl $1000, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
jmp .L13
.L22:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
jmp .L17
.L25:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z6kernelPdS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelPdS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1072693248
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC3:
.long -1
.long 2147483647
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC4:
.long -1747416644
.long 1016910514
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(x) printf("%s (%d)\n", cudaGetErrorString(x), __LINE__)
__global__ void kernel(double *a, double *b, double *c, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
// Possible correction: let one thread do more than one calculation.
// If there is less threads than elements to calculate, then some threads will
// do 2 calculations (instead of one).
if (i < N-640) {
c[i+640] = a[i+640] + b[i+640];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++) {
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(cudaMalloc((void**)&d_a, sz_in_bytes));
// Correction:
// checkCudaErrors(cudaMalloc((void**)&d_b, 0));
checkCudaErrors(cudaMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(cudaMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(cudaMemcpy(d_a, h_a, sz_in_bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_b, h_b, sz_in_bytes, cudaMemcpyHostToDevice));
// 640 threads au total.
// But we calculate 1000 values.
// => error.
// One correction is: use enough threads.
// dim3 dimBlock(64, 1, 1);
// dim3 dimGrid(10, 1, 1) => dim3 dimGrid(10, 1, 1);
// Another correction is:
// Let a thread make more than one calculation (see function kernel()).
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(16, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(cudaMemcpy(h_c, d_c, sz_in_bytes, cudaMemcpyDeviceToHost));
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++) {
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16) {
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
} else {
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(x) printf("%s (%d)\n", hipGetErrorString(x), __LINE__)
__global__ void kernel(double *a, double *b, double *c, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
// Possible correction: let one thread do more than one calculation.
// If there is less threads than elements to calculate, then some threads will
// do 2 calculations (instead of one).
if (i < N-640) {
c[i+640] = a[i+640] + b[i+640];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++) {
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(hipMalloc((void**)&d_a, sz_in_bytes));
// Correction:
// checkCudaErrors(cudaMalloc((void**)&d_b, 0));
checkCudaErrors(hipMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(hipMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(hipMemcpy(d_a, h_a, sz_in_bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_b, h_b, sz_in_bytes, hipMemcpyHostToDevice));
// 640 threads au total.
// But we calculate 1000 values.
// => error.
// One correction is: use enough threads.
// dim3 dimBlock(64, 1, 1);
// dim3 dimGrid(10, 1, 1) => dim3 dimGrid(10, 1, 1);
// Another correction is:
// Let a thread make more than one calculation (see function kernel()).
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(16, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(hipMemcpy(h_c, d_c, sz_in_bytes, hipMemcpyDeviceToHost));
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++) {
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16) {
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
} else {
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(x) printf("%s (%d)\n", hipGetErrorString(x), __LINE__)
__global__ void kernel(double *a, double *b, double *c, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
// Possible correction: let one thread do more than one calculation.
// If there is less threads than elements to calculate, then some threads will
// do 2 calculations (instead of one).
if (i < N-640) {
c[i+640] = a[i+640] + b[i+640];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++) {
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(hipMalloc((void**)&d_a, sz_in_bytes));
// Correction:
// checkCudaErrors(cudaMalloc((void**)&d_b, 0));
checkCudaErrors(hipMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(hipMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(hipMemcpy(d_a, h_a, sz_in_bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_b, h_b, sz_in_bytes, hipMemcpyHostToDevice));
// 640 threads au total.
// But we calculate 1000 values.
// => error.
// One correction is: use enough threads.
// dim3 dimBlock(64, 1, 1);
// dim3 dimGrid(10, 1, 1) => dim3 dimGrid(10, 1, 1);
// Another correction is:
// Let a thread make more than one calculation (see function kernel()).
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(16, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(hipMemcpy(h_c, d_c, sz_in_bytes, hipMemcpyDeviceToHost));
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++) {
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16) {
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
} else {
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelPdS_S_i
.globl _Z6kernelPdS_S_i
.p2align 8
.type _Z6kernelPdS_S_i,@function
_Z6kernelPdS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b128 s[4:7], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, s15, s2, v[0:1]
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x10
s_load_b32 s0, s[0:1], 0x18
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[2:3]
v_add_co_u32 v3, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v5, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v1, vcc_lo
s_waitcnt lgkmcnt(0)
s_addk_i32 s0, 0xfd80
global_load_b64 v[3:4], v[3:4], off
global_load_b64 v[5:6], v[5:6], off
s_waitcnt vmcnt(0)
v_add_f64 v[3:4], v[3:4], v[5:6]
v_add_co_u32 v5, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v1, vcc_lo
v_cmp_gt_i32_e32 vcc_lo, s0, v2
global_store_b64 v[5:6], v[3:4], off
s_and_saveexec_b32 s0, vcc_lo
s_cbranch_execz .LBB0_2
v_add_co_u32 v4, vcc_lo, v0, 0x1400
v_add_co_ci_u32_e32 v5, vcc_lo, 0, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v5, vcc_lo
v_add_co_u32 v2, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v5, vcc_lo
global_load_b64 v[0:1], v[0:1], off
global_load_b64 v[2:3], v[2:3], off
s_waitcnt vmcnt(0)
v_add_f64 v[0:1], v[0:1], v[2:3]
v_add_co_u32 v2, vcc_lo, s2, v4
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v5, vcc_lo
global_store_b64 v[2:3], v[0:1], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelPdS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelPdS_S_i, .Lfunc_end0-_Z6kernelPdS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelPdS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6kernelPdS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(x) printf("%s (%d)\n", hipGetErrorString(x), __LINE__)
__global__ void kernel(double *a, double *b, double *c, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
// Possible correction: let one thread do more than one calculation.
// If there is less threads than elements to calculate, then some threads will
// do 2 calculations (instead of one).
if (i < N-640) {
c[i+640] = a[i+640] + b[i+640];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++) {
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(hipMalloc((void**)&d_a, sz_in_bytes));
// Correction:
// checkCudaErrors(cudaMalloc((void**)&d_b, 0));
checkCudaErrors(hipMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(hipMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(hipMemcpy(d_a, h_a, sz_in_bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_b, h_b, sz_in_bytes, hipMemcpyHostToDevice));
// 640 threads au total.
// But we calculate 1000 values.
// => error.
// One correction is: use enough threads.
// dim3 dimBlock(64, 1, 1);
// dim3 dimGrid(10, 1, 1) => dim3 dimGrid(10, 1, 1);
// Another correction is:
// Let a thread make more than one calculation (see function kernel()).
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(16, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(hipMemcpy(h_c, d_c, sz_in_bytes, hipMemcpyDeviceToHost));
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++) {
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16) {
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
} else {
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
} | .text
.file "err1.hip"
.globl _Z21__device_stub__kernelPdS_S_i # -- Begin function _Z21__device_stub__kernelPdS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__kernelPdS_S_i,@function
_Z21__device_stub__kernelPdS_S_i: # @_Z21__device_stub__kernelPdS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelPdS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelPdS_S_i, .Lfunc_end0-_Z21__device_stub__kernelPdS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x3ff0000000000000 # double 1
.LCPI1_1:
.quad 0xbff0000000000000 # double -1
.LCPI1_3:
.quad 0x3c9cd2b297d889bc # double 9.9999999999999997E-17
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI1_2:
.quad 0x7fffffffffffffff # double NaN
.quad 0x7fffffffffffffff # double NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $152, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $8000, %edi # imm = 0x1F40
callq malloc
movq %rax, %rbx
movl $8000, %edi # imm = 0x1F40
callq malloc
movq %rax, %r14
movl $8000, %edi # imm = 0x1F40
callq malloc
movq %rax, %r15
xorl %eax, %eax
movsd .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero
movsd .LCPI1_1(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm2, %xmm2
cvtsi2sd %eax, %xmm2
movapd %xmm2, %xmm3
addsd %xmm0, %xmm3
movapd %xmm0, %xmm4
divsd %xmm3, %xmm4
movsd %xmm4, (%rbx,%rax,8)
addsd %xmm1, %xmm2
divsd %xmm3, %xmm2
movsd %xmm2, (%r14,%rax,8)
incq %rax
cmpq $1000, %rax # imm = 0x3E8
jne .LBB1_1
# %bb.2:
leaq 24(%rsp), %rdi
movl $8000, %esi # imm = 0x1F40
callq hipMalloc
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
movl $44, %edx
xorl %eax, %eax
callq printf
leaq 16(%rsp), %rdi
movl $8000, %esi # imm = 0x1F40
callq hipMalloc
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
movl $47, %edx
xorl %eax, %eax
callq printf
leaq 8(%rsp), %rdi
movl $8000, %esi # imm = 0x1F40
callq hipMalloc
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
movl $48, %edx
xorl %eax, %eax
callq printf
movq 24(%rsp), %rdi
movl $8000, %edx # imm = 0x1F40
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
movl $50, %edx
xorl %eax, %eax
callq printf
movq 16(%rsp), %rdi
movl $8000, %edx # imm = 0x1F40
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
movl $51, %edx
xorl %eax, %eax
callq printf
movabsq $4294967312, %rdi # imm = 0x100000010
leaq 48(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $1000, 36(%rsp) # imm = 0x3E8
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z6kernelPdS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq 8(%rsp), %rsi
movl $8000, %edx # imm = 0x1F40
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movl %eax, %edi
callq hipGetErrorString
xorl %r12d, %r12d
movl $.L.str, %edi
movq %rax, %rsi
movl $66, %edx
xorl %eax, %eax
callq printf
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorpd %xmm1, %xmm1
movapd .LCPI1_2(%rip), %xmm2 # xmm2 = [NaN,NaN]
xorpd %xmm0, %xmm0
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movsd (%r15,%r12,8), %xmm3 # xmm3 = mem[0],zero
movsd (%rbx,%r12,8), %xmm4 # xmm4 = mem[0],zero
addsd (%r14,%r12,8), %xmm4
movapd %xmm3, %xmm5
subsd %xmm4, %xmm5
andpd %xmm2, %xmm5
addsd %xmm5, %xmm0
andpd %xmm2, %xmm3
addsd %xmm3, %xmm1
incq %r12
cmpq $1000, %r12 # imm = 0x3E8
jne .LBB1_5
# %bb.6:
divsd %xmm1, %xmm0
movsd .LCPI1_3(%rip), %xmm1 # xmm1 = mem[0],zero
ucomisd %xmm0, %xmm1
movl $.L.str.1, %eax
movl $.L.str.2, %edi
cmovaq %rax, %rdi
movb $1, %al
callq printf
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelPdS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelPdS_S_i,@object # @_Z6kernelPdS_S_i
.section .rodata,"a",@progbits
.globl _Z6kernelPdS_S_i
.p2align 3, 0x0
_Z6kernelPdS_S_i:
.quad _Z21__device_stub__kernelPdS_S_i
.size _Z6kernelPdS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%s (%d)\n"
.size .L.str, 9
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "SUCCESS (Relative error : %.3e)\n"
.size .L.str.1, 33
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "ERROR (Relative error : %.3e)\n"
.size .L.str.2, 31
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kernelPdS_S_i"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelPdS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelPdS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6kernelPdS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R11, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff0b7435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R4, R0, R11, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e020b */
/*0070*/ IMAD.WIDE R2, R0, R11, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fe200078e020b */
/*0080*/ LDG.E.64 R6, [R4.64] ; /* 0x0000000604067981 */
/* 0x000ea8000c1e1b00 */
/*0090*/ LDG.E.64 R8, [R2.64] ; /* 0x0000000602087981 */
/* 0x000ea2000c1e1b00 */
/*00a0*/ ULDC UR4, c[0x0][0x178] ; /* 0x00005e0000047ab9 */
/* 0x000fe40000000800 */
/*00b0*/ UIADD3 UR4, UR4, -0x280, URZ ; /* 0xfffffd8004047890 */
/* 0x000fcc000fffe03f */
/*00c0*/ ISETP.GE.AND P0, PT, R0.reuse, UR4, PT ; /* 0x0000000400007c0c */
/* 0x040fe2000bf06270 */
/*00d0*/ DADD R6, R6, R8 ; /* 0x0000000006067229 */
/* 0x0040640000000008 */
/*00e0*/ IMAD.WIDE R8, R0, R11, c[0x0][0x170] ; /* 0x00005c0000087625 */
/* 0x001fca00078e020b */
/*00f0*/ STG.E.64 [R8.64], R6 ; /* 0x0000000608007986 */
/* 0x0021ea000c101b06 */
/*0100*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0110*/ LDG.E.64 R4, [R4.64+0x1400] ; /* 0x0014000604047981 */
/* 0x000ea8000c1e1b00 */
/*0120*/ LDG.E.64 R2, [R2.64+0x1400] ; /* 0x0014000602027981 */
/* 0x000ea4000c1e1b00 */
/*0130*/ DADD R6, R4, R2 ; /* 0x0000000004067229 */
/* 0x005e0e0000000002 */
/*0140*/ STG.E.64 [R8.64+0x1400], R6 ; /* 0x0014000608007986 */
/* 0x001fe2000c101b06 */
/*0150*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0160*/ BRA 0x160; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelPdS_S_i
.globl _Z6kernelPdS_S_i
.p2align 8
.type _Z6kernelPdS_S_i,@function
_Z6kernelPdS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b128 s[4:7], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, s15, s2, v[0:1]
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x10
s_load_b32 s0, s[0:1], 0x18
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[2:3]
v_add_co_u32 v3, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v5, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v1, vcc_lo
s_waitcnt lgkmcnt(0)
s_addk_i32 s0, 0xfd80
global_load_b64 v[3:4], v[3:4], off
global_load_b64 v[5:6], v[5:6], off
s_waitcnt vmcnt(0)
v_add_f64 v[3:4], v[3:4], v[5:6]
v_add_co_u32 v5, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v1, vcc_lo
v_cmp_gt_i32_e32 vcc_lo, s0, v2
global_store_b64 v[5:6], v[3:4], off
s_and_saveexec_b32 s0, vcc_lo
s_cbranch_execz .LBB0_2
v_add_co_u32 v4, vcc_lo, v0, 0x1400
v_add_co_ci_u32_e32 v5, vcc_lo, 0, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v5, vcc_lo
v_add_co_u32 v2, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v5, vcc_lo
global_load_b64 v[0:1], v[0:1], off
global_load_b64 v[2:3], v[2:3], off
s_waitcnt vmcnt(0)
v_add_f64 v[0:1], v[0:1], v[2:3]
v_add_co_u32 v2, vcc_lo, s2, v4
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v5, vcc_lo
global_store_b64 v[2:3], v[0:1], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelPdS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelPdS_S_i, .Lfunc_end0-_Z6kernelPdS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelPdS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6kernelPdS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0009252c_00000000-6_err1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
.type _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i, @function
_Z30__device_stub__Z6kernelPdS_S_iPdS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6kernelPdS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i, .-_Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
.globl _Z6kernelPdS_S_i
.type _Z6kernelPdS_S_i, @function
_Z6kernelPdS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6kernelPdS_S_i, .-_Z6kernelPdS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "%s (%d)\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "SUCCESS (Relative error : %.3e)\n"
.align 8
.LC6:
.string "ERROR (Relative error : %.3e)\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $72, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $8000, %edi
call malloc@PLT
movq %rax, %rbp
movl $8000, %edi
call malloc@PLT
movq %rax, %rbx
movl $8000, %edi
call malloc@PLT
movq %rax, %r12
movl $0, %eax
movsd .LC1(%rip), %xmm1
.L12:
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
movapd %xmm0, %xmm2
addsd %xmm1, %xmm2
movapd %xmm1, %xmm3
divsd %xmm2, %xmm3
movsd %xmm3, 0(%rbp,%rax,8)
subsd %xmm1, %xmm0
divsd %xmm2, %xmm0
movsd %xmm0, (%rbx,%rax,8)
addq $1, %rax
cmpq $1000, %rax
jne .L12
leaq 8(%rsp), %rdi
movl $8000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $42, %ecx
leaq .LC2(%rip), %r13
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 16(%rsp), %rdi
movl $8000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $45, %ecx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 24(%rsp), %rdi
movl $8000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $46, %ecx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $8000, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $48, %ecx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $8000, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $49, %ecx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $64, 32(%rsp)
movl $1, 36(%rsp)
movl $16, 44(%rsp)
movl $1, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movl $1, %ecx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L24
.L13:
movl $2, %ecx
movl $8000, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $64, %ecx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movl $0, %eax
pxor %xmm4, %xmm4
movapd %xmm4, %xmm0
movq .LC3(%rip), %xmm5
.L14:
movsd (%r12,%rax), %xmm1
movsd 0(%rbp,%rax), %xmm3
addsd (%rbx,%rax), %xmm3
movapd %xmm1, %xmm2
subsd %xmm3, %xmm2
andpd %xmm5, %xmm2
addsd %xmm2, %xmm0
andpd %xmm5, %xmm1
addsd %xmm1, %xmm4
addq $8, %rax
cmpq $8000, %rax
jne .L14
divsd %xmm4, %xmm0
movsd .LC4(%rip), %xmm1
comisd %xmm0, %xmm1
jbe .L22
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
.L17:
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L25
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
movl $1000, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
jmp .L13
.L22:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
jmp .L17
.L25:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z6kernelPdS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelPdS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1072693248
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC3:
.long -1
.long 2147483647
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC4:
.long -1747416644
.long 1016910514
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "err1.hip"
.globl _Z21__device_stub__kernelPdS_S_i # -- Begin function _Z21__device_stub__kernelPdS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__kernelPdS_S_i,@function
_Z21__device_stub__kernelPdS_S_i: # @_Z21__device_stub__kernelPdS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelPdS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelPdS_S_i, .Lfunc_end0-_Z21__device_stub__kernelPdS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x3ff0000000000000 # double 1
.LCPI1_1:
.quad 0xbff0000000000000 # double -1
.LCPI1_3:
.quad 0x3c9cd2b297d889bc # double 9.9999999999999997E-17
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI1_2:
.quad 0x7fffffffffffffff # double NaN
.quad 0x7fffffffffffffff # double NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $152, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $8000, %edi # imm = 0x1F40
callq malloc
movq %rax, %rbx
movl $8000, %edi # imm = 0x1F40
callq malloc
movq %rax, %r14
movl $8000, %edi # imm = 0x1F40
callq malloc
movq %rax, %r15
xorl %eax, %eax
movsd .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero
movsd .LCPI1_1(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm2, %xmm2
cvtsi2sd %eax, %xmm2
movapd %xmm2, %xmm3
addsd %xmm0, %xmm3
movapd %xmm0, %xmm4
divsd %xmm3, %xmm4
movsd %xmm4, (%rbx,%rax,8)
addsd %xmm1, %xmm2
divsd %xmm3, %xmm2
movsd %xmm2, (%r14,%rax,8)
incq %rax
cmpq $1000, %rax # imm = 0x3E8
jne .LBB1_1
# %bb.2:
leaq 24(%rsp), %rdi
movl $8000, %esi # imm = 0x1F40
callq hipMalloc
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
movl $44, %edx
xorl %eax, %eax
callq printf
leaq 16(%rsp), %rdi
movl $8000, %esi # imm = 0x1F40
callq hipMalloc
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
movl $47, %edx
xorl %eax, %eax
callq printf
leaq 8(%rsp), %rdi
movl $8000, %esi # imm = 0x1F40
callq hipMalloc
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
movl $48, %edx
xorl %eax, %eax
callq printf
movq 24(%rsp), %rdi
movl $8000, %edx # imm = 0x1F40
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
movl $50, %edx
xorl %eax, %eax
callq printf
movq 16(%rsp), %rdi
movl $8000, %edx # imm = 0x1F40
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
movl $51, %edx
xorl %eax, %eax
callq printf
movabsq $4294967312, %rdi # imm = 0x100000010
leaq 48(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $1000, 36(%rsp) # imm = 0x3E8
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z6kernelPdS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq 8(%rsp), %rsi
movl $8000, %edx # imm = 0x1F40
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movl %eax, %edi
callq hipGetErrorString
xorl %r12d, %r12d
movl $.L.str, %edi
movq %rax, %rsi
movl $66, %edx
xorl %eax, %eax
callq printf
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorpd %xmm1, %xmm1
movapd .LCPI1_2(%rip), %xmm2 # xmm2 = [NaN,NaN]
xorpd %xmm0, %xmm0
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movsd (%r15,%r12,8), %xmm3 # xmm3 = mem[0],zero
movsd (%rbx,%r12,8), %xmm4 # xmm4 = mem[0],zero
addsd (%r14,%r12,8), %xmm4
movapd %xmm3, %xmm5
subsd %xmm4, %xmm5
andpd %xmm2, %xmm5
addsd %xmm5, %xmm0
andpd %xmm2, %xmm3
addsd %xmm3, %xmm1
incq %r12
cmpq $1000, %r12 # imm = 0x3E8
jne .LBB1_5
# %bb.6:
divsd %xmm1, %xmm0
movsd .LCPI1_3(%rip), %xmm1 # xmm1 = mem[0],zero
ucomisd %xmm0, %xmm1
movl $.L.str.1, %eax
movl $.L.str.2, %edi
cmovaq %rax, %rdi
movb $1, %al
callq printf
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelPdS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelPdS_S_i,@object # @_Z6kernelPdS_S_i
.section .rodata,"a",@progbits
.globl _Z6kernelPdS_S_i
.p2align 3, 0x0
_Z6kernelPdS_S_i:
.quad _Z21__device_stub__kernelPdS_S_i
.size _Z6kernelPdS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%s (%d)\n"
.size .L.str, 9
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "SUCCESS (Relative error : %.3e)\n"
.size .L.str.1, 33
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "ERROR (Relative error : %.3e)\n"
.size .L.str.2, 31
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kernelPdS_S_i"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelPdS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelPdS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
// TEST to see if i can resize vector in a function
int new_size = 10;
int dev_init = 33;
void resize_vec(thrust::host_vector<int> &vec){
vec.resize(new_size);
// for( int i = 0; i< new_size; i++)
// vec[i] = i;
thrust::device_vector<int> d_vec(new_size, dev_init);
int* d_vec_ptr = thrust::raw_pointer_cast(&d_vec[0]);
int* vec_ptr = thrust::raw_pointer_cast(&vec[0]);
cudaMemcpy(vec_ptr, d_vec_ptr, (size_t)(new_size*sizeof(int)), cudaMemcpyDeviceToHost);
}
int main(){
thrust::host_vector<int> test_vec(0);
std::cout << "Old size = " << test_vec.size() << std::endl;
resize_vec(test_vec);
std::cout << "new size = " << test_vec.size() << std::endl;
for( int i = 0; i< new_size; i++)
std::cout << test_vec[i] << std::endl;
return 0;
} | code for sm_80
Function : _ZN3cub17CUB_200700_800_NS6detail8for_each13static_kernelINS2_12policy_hub_t12policy_350_tEmN6thrust20THRUST_200700_800_NS8cuda_cub20__uninitialized_fill7functorINS7_10device_ptrIiEEiEEEEvT0_T1_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0040*/ IMAD.WIDE.U32 R2, R2, 0x200, RZ ; /* 0x0000020002027825 */
/* 0x001fca00078e00ff */
/*0050*/ IADD3 R4, P1, -R2.reuse, c[0x0][0x160], RZ ; /* 0x0000580002047a10 */
/* 0x040fe40007f3e1ff */
/*0060*/ IADD3 R0, P2, R2, R5, RZ ; /* 0x0000000502007210 */
/* 0x002fe40007f5e0ff */
/*0070*/ ISETP.GT.U32.AND P0, PT, R4, 0x1ff, PT ; /* 0x000001ff0400780c */
/* 0x000fe40003f04070 */
/*0080*/ IADD3.X R6, ~R3, c[0x0][0x164], RZ, P1, !PT ; /* 0x0000590003067a10 */
/* 0x000fe20000ffe5ff */
/*0090*/ IMAD.X R3, RZ, RZ, R3, P2 ; /* 0x000000ffff037224 */
/* 0x000fe200010e0603 */
/*00a0*/ LEA R2, P1, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000027a11 */
/* 0x000fe400078210ff */
/*00b0*/ ISETP.GT.U32.AND.EX P0, PT, R6, RZ, PT, P0 ; /* 0x000000ff0600720c */
/* 0x000fc40003f04100 */
/*00c0*/ LEA.HI.X R3, R0, c[0x0][0x16c], R3, 0x2, P1 ; /* 0x00005b0000037a11 */
/* 0x000fd600008f1403 */
/*00d0*/ @P0 BRA 0x1a0 ; /* 0x000000c000000947 */
/* 0x000fea0003800000 */
/*00e0*/ ISETP.GT.U32.AND P0, PT, R4, R5, PT ; /* 0x000000050400720c */
/* 0x000fe40003f04070 */
/*00f0*/ SHF.R.S32.HI R6, RZ, 0x1f, R4 ; /* 0x0000001fff067819 */
/* 0x000fe40000011404 */
/*0100*/ IADD3 R0, R5, 0x100, RZ ; /* 0x0000010005007810 */
/* 0x000fe40007ffe0ff */
/*0110*/ ISETP.GT.U32.AND.EX P0, PT, R6, RZ, PT, P0 ; /* 0x000000ff0600720c */
/* 0x000fda0003f04100 */
/*0120*/ @P0 IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff070624 */
/* 0x000fca00078e00ff */
/*0130*/ @P0 STG.E [R2.64], R7 ; /* 0x0000000702000986 */
/* 0x0001e2000c101904 */
/*0140*/ ISETP.GT.U32.AND P0, PT, R4, R0, PT ; /* 0x000000000400720c */
/* 0x000fc80003f04070 */
/*0150*/ ISETP.GT.U32.AND.EX P0, PT, R6, RZ, PT, P0 ; /* 0x000000ff0600720c */
/* 0x000fda0003f04100 */
/*0160*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0170*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff057624 */
/* 0x001fca00078e00ff */
/*0180*/ STG.E [R2.64+0x400], R5 ; /* 0x0004000502007986 */
/* 0x000fe2000c101904 */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff057624 */
/* 0x000fca00078e00ff */
/*01b0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101904 */
/*01c0*/ STG.E [R2.64+0x400], R5 ; /* 0x0004000502007986 */
/* 0x000fe2000c101904 */
/*01d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
// TEST to see if i can resize vector in a function
int new_size = 10;
int dev_init = 33;
void resize_vec(thrust::host_vector<int> &vec){
vec.resize(new_size);
// for( int i = 0; i< new_size; i++)
// vec[i] = i;
thrust::device_vector<int> d_vec(new_size, dev_init);
int* d_vec_ptr = thrust::raw_pointer_cast(&d_vec[0]);
int* vec_ptr = thrust::raw_pointer_cast(&vec[0]);
cudaMemcpy(vec_ptr, d_vec_ptr, (size_t)(new_size*sizeof(int)), cudaMemcpyDeviceToHost);
}
int main(){
thrust::host_vector<int> test_vec(0);
std::cout << "Old size = " << test_vec.size() << std::endl;
resize_vec(test_vec);
std::cout << "new size = " << test_vec.size() << std::endl;
for( int i = 0; i< new_size; i++)
std::cout << test_vec[i] << std::endl;
return 0;
} | #include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
// TEST to see if i can resize vector in a function
int new_size = 10;
int dev_init = 33;
void resize_vec(thrust::host_vector<int> &vec){
vec.resize(new_size);
// for( int i = 0; i< new_size; i++)
// vec[i] = i;
thrust::device_vector<int> d_vec(new_size, dev_init);
int* d_vec_ptr = thrust::raw_pointer_cast(&d_vec[0]);
int* vec_ptr = thrust::raw_pointer_cast(&vec[0]);
hipMemcpy(vec_ptr, d_vec_ptr, (size_t)(new_size*sizeof(int)), hipMemcpyDeviceToHost);
}
int main(){
thrust::host_vector<int> test_vec(0);
std::cout << "Old size = " << test_vec.size() << std::endl;
resize_vec(test_vec);
std::cout << "new size = " << test_vec.size() << std::endl;
for( int i = 0; i< new_size; i++)
std::cout << test_vec[i] << std::endl;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
// TEST to see if i can resize vector in a function
int new_size = 10;
int dev_init = 33;
void resize_vec(thrust::host_vector<int> &vec){
vec.resize(new_size);
// for( int i = 0; i< new_size; i++)
// vec[i] = i;
thrust::device_vector<int> d_vec(new_size, dev_init);
int* d_vec_ptr = thrust::raw_pointer_cast(&d_vec[0]);
int* vec_ptr = thrust::raw_pointer_cast(&vec[0]);
hipMemcpy(vec_ptr, d_vec_ptr, (size_t)(new_size*sizeof(int)), hipMemcpyDeviceToHost);
}
int main(){
thrust::host_vector<int> test_vec(0);
std::cout << "Old size = " << test_vec.size() << std::endl;
resize_vec(test_vec);
std::cout << "new size = " << test_vec.size() << std::endl;
for( int i = 0; i< new_size; i++)
std::cout << test_vec[i] << std::endl;
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_,comdat
.protected _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_
.globl _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_
.p2align 8
.type _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_,@function
_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_:
s_load_b128 s[4:7], s[0:1], 0x10
s_lshl_b32 s2, s15, 8
s_waitcnt lgkmcnt(0)
s_add_u32 s2, s2, s6
s_addc_u32 s3, 0, s7
s_sub_u32 s4, s4, s2
s_subb_u32 s5, s5, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_u64_e64 s5, 0x100, s[4:5]
s_and_b32 s5, s5, exec_lo
s_cselect_b32 s4, s4, 0x100
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_cmp_gt_u32_e32 vcc_lo, s4, v0
s_cmpk_eq_i32 s4, 0x100
s_cselect_b32 s4, -1, 0
s_or_b32 s4, s4, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s5, s4
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x0
s_load_b32 s6, s[0:1], 0x8
v_lshlrev_b32_e32 v0, 2, v0
s_lshl_b64 s[0:1], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s4, s0
s_addc_u32 s1, s5, s1
v_add_co_u32 v0, s0, s0, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v1, null, s1, 0, s0
v_mov_b32_e32 v2, s6
flat_store_b32 v[0:1], v2
.LBB0_2:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_,comdat
.Lfunc_end0:
.size _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_, .Lfunc_end0-_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 16
.value_kind: by_value
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 8
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 256
.name: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _ZN3cub17CUB_200700_800_NS6detail8for_each13static_kernelINS2_12policy_hub_t12policy_350_tEmN6thrust20THRUST_200700_800_NS8cuda_cub20__uninitialized_fill7functorINS7_10device_ptrIiEEiEEEEvT0_T1_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0040*/ IMAD.WIDE.U32 R2, R2, 0x200, RZ ; /* 0x0000020002027825 */
/* 0x001fca00078e00ff */
/*0050*/ IADD3 R4, P1, -R2.reuse, c[0x0][0x160], RZ ; /* 0x0000580002047a10 */
/* 0x040fe40007f3e1ff */
/*0060*/ IADD3 R0, P2, R2, R5, RZ ; /* 0x0000000502007210 */
/* 0x002fe40007f5e0ff */
/*0070*/ ISETP.GT.U32.AND P0, PT, R4, 0x1ff, PT ; /* 0x000001ff0400780c */
/* 0x000fe40003f04070 */
/*0080*/ IADD3.X R6, ~R3, c[0x0][0x164], RZ, P1, !PT ; /* 0x0000590003067a10 */
/* 0x000fe20000ffe5ff */
/*0090*/ IMAD.X R3, RZ, RZ, R3, P2 ; /* 0x000000ffff037224 */
/* 0x000fe200010e0603 */
/*00a0*/ LEA R2, P1, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000027a11 */
/* 0x000fe400078210ff */
/*00b0*/ ISETP.GT.U32.AND.EX P0, PT, R6, RZ, PT, P0 ; /* 0x000000ff0600720c */
/* 0x000fc40003f04100 */
/*00c0*/ LEA.HI.X R3, R0, c[0x0][0x16c], R3, 0x2, P1 ; /* 0x00005b0000037a11 */
/* 0x000fd600008f1403 */
/*00d0*/ @P0 BRA 0x1a0 ; /* 0x000000c000000947 */
/* 0x000fea0003800000 */
/*00e0*/ ISETP.GT.U32.AND P0, PT, R4, R5, PT ; /* 0x000000050400720c */
/* 0x000fe40003f04070 */
/*00f0*/ SHF.R.S32.HI R6, RZ, 0x1f, R4 ; /* 0x0000001fff067819 */
/* 0x000fe40000011404 */
/*0100*/ IADD3 R0, R5, 0x100, RZ ; /* 0x0000010005007810 */
/* 0x000fe40007ffe0ff */
/*0110*/ ISETP.GT.U32.AND.EX P0, PT, R6, RZ, PT, P0 ; /* 0x000000ff0600720c */
/* 0x000fda0003f04100 */
/*0120*/ @P0 IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff070624 */
/* 0x000fca00078e00ff */
/*0130*/ @P0 STG.E [R2.64], R7 ; /* 0x0000000702000986 */
/* 0x0001e2000c101904 */
/*0140*/ ISETP.GT.U32.AND P0, PT, R4, R0, PT ; /* 0x000000000400720c */
/* 0x000fc80003f04070 */
/*0150*/ ISETP.GT.U32.AND.EX P0, PT, R6, RZ, PT, P0 ; /* 0x000000ff0600720c */
/* 0x000fda0003f04100 */
/*0160*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0170*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff057624 */
/* 0x001fca00078e00ff */
/*0180*/ STG.E [R2.64+0x400], R5 ; /* 0x0004000502007986 */
/* 0x000fe2000c101904 */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff057624 */
/* 0x000fca00078e00ff */
/*01b0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101904 */
/*01c0*/ STG.E [R2.64+0x400], R5 ; /* 0x0004000502007986 */
/* 0x000fe2000c101904 */
/*01d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_,comdat
.protected _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_
.globl _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_
.p2align 8
.type _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_,@function
_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_:
s_load_b128 s[4:7], s[0:1], 0x10
s_lshl_b32 s2, s15, 8
s_waitcnt lgkmcnt(0)
s_add_u32 s2, s2, s6
s_addc_u32 s3, 0, s7
s_sub_u32 s4, s4, s2
s_subb_u32 s5, s5, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_u64_e64 s5, 0x100, s[4:5]
s_and_b32 s5, s5, exec_lo
s_cselect_b32 s4, s4, 0x100
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_cmp_gt_u32_e32 vcc_lo, s4, v0
s_cmpk_eq_i32 s4, 0x100
s_cselect_b32 s4, -1, 0
s_or_b32 s4, s4, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s5, s4
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x0
s_load_b32 s6, s[0:1], 0x8
v_lshlrev_b32_e32 v0, 2, v0
s_lshl_b64 s[0:1], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s4, s0
s_addc_u32 s1, s5, s1
v_add_co_u32 v0, s0, s0, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v1, null, s1, 0, s0
v_mov_b32_e32 v2, s6
flat_store_b32 v[0:1], v2
.LBB0_2:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_,comdat
.Lfunc_end0:
.size _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_, .Lfunc_end0-_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 16
.value_kind: by_value
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 8
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 256
.name: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIiEEiEEmLj1EEEvT0_T1_S9_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(val)\
fprintf(stderr, "CUDA error at %s:%d (%s) \n", __FILE__, __LINE__, cudaGetErrorString(val));
//Par rapport a la question 7 N = 1000 et nb thread = 640
// =>si on fait 2 x nb_thread alors 1280 threads > N peut causer bufferoverflow/seg fault
__global__ void kernel(double *a, double *b, double *c, int N)
{
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//Q 8 :
/*
int i = 2*(blockIdx.x * blockDim.x + threadIdx.x);
if(i<N-1){
c[i] = a[i] + b[i];
c[i+1] = a[i+1] + b[i+1];
}*/
//Q 8 second way :
int i = blockIdx.x * blockDim.x + threadIdx.x;
int totalthreads = (blockDim.x * gridDim.x);
c[i] = a[i] + b[i];
if(i<(N-totalthreads)){
c[i+totalthreads] = a[i+totalthreads] + b[i+totalthreads];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++)
{
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(cudaMalloc((void**)&d_a, sz_in_bytes));
checkCudaErrors(cudaMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(cudaMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(cudaMemcpy(d_a, h_a, sz_in_bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_b, h_b, sz_in_bytes, cudaMemcpyHostToDevice));
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(10, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(cudaMemcpy(h_c, d_c, sz_in_bytes, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_a));
checkCudaErrors(cudaFree(d_b));
checkCudaErrors(cudaFree(d_c));
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++)
{
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16)
{
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
}
else
{
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
} | code for sm_80
Function : _Z6kernelPdS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R15, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff0f7435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R4, R0, R15, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e020f */
/*0070*/ IMAD.WIDE R2, R0, R15, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fe200078e020f */
/*0080*/ LDG.E.64 R6, [R4.64] ; /* 0x0000000404067981 */
/* 0x000ea8000c1e1b00 */
/*0090*/ LDG.E.64 R8, [R2.64] ; /* 0x0000000402087981 */
/* 0x000ea2000c1e1b00 */
/*00a0*/ MOV R11, c[0x0][0x0] ; /* 0x00000000000b7a02 */
/* 0x000fca0000000f00 */
/*00b0*/ IMAD R11, R11, c[0x0][0xc], RZ ; /* 0x000003000b0b7a24 */
/* 0x000fca00078e02ff */
/*00c0*/ IADD3 R13, -R11, c[0x0][0x178], RZ ; /* 0x00005e000b0d7a10 */
/* 0x000fc80007ffe1ff */
/*00d0*/ ISETP.GE.AND P0, PT, R0.reuse, R13, PT ; /* 0x0000000d0000720c */
/* 0x040fe20003f06270 */
/*00e0*/ DADD R6, R6, R8 ; /* 0x0000000006067229 */
/* 0x0040640000000008 */
/*00f0*/ IMAD.WIDE R8, R0, R15, c[0x0][0x170] ; /* 0x00005c0000087625 */
/* 0x001fca00078e020f */
/*0100*/ STG.E.64 [R8.64], R6 ; /* 0x0000000608007986 */
/* 0x0021ea000c101b04 */
/*0110*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0120*/ IMAD.WIDE R4, R11, 0x8, R4 ; /* 0x000000080b047825 */
/* 0x000fc800078e0204 */
/*0130*/ IMAD.WIDE R2, R11.reuse, 0x8, R2 ; /* 0x000000080b027825 */
/* 0x040fe400078e0202 */
/*0140*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1b00 */
/*0150*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*0160*/ IMAD.WIDE R8, R11, 0x8, R8 ; /* 0x000000080b087825 */
/* 0x001fe200078e0208 */
/*0170*/ DADD R6, R4, R2 ; /* 0x0000000004067229 */
/* 0x004e0e0000000002 */
/*0180*/ STG.E.64 [R8.64], R6 ; /* 0x0000000608007986 */
/* 0x001fe2000c101b04 */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ BRA 0x1a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(val)\
fprintf(stderr, "CUDA error at %s:%d (%s) \n", __FILE__, __LINE__, cudaGetErrorString(val));
//Par rapport a la question 7 N = 1000 et nb thread = 640
// =>si on fait 2 x nb_thread alors 1280 threads > N peut causer bufferoverflow/seg fault
__global__ void kernel(double *a, double *b, double *c, int N)
{
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//Q 8 :
/*
int i = 2*(blockIdx.x * blockDim.x + threadIdx.x);
if(i<N-1){
c[i] = a[i] + b[i];
c[i+1] = a[i+1] + b[i+1];
}*/
//Q 8 second way :
int i = blockIdx.x * blockDim.x + threadIdx.x;
int totalthreads = (blockDim.x * gridDim.x);
c[i] = a[i] + b[i];
if(i<(N-totalthreads)){
c[i+totalthreads] = a[i+totalthreads] + b[i+totalthreads];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++)
{
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(cudaMalloc((void**)&d_a, sz_in_bytes));
checkCudaErrors(cudaMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(cudaMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(cudaMemcpy(d_a, h_a, sz_in_bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_b, h_b, sz_in_bytes, cudaMemcpyHostToDevice));
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(10, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(cudaMemcpy(h_c, d_c, sz_in_bytes, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_a));
checkCudaErrors(cudaFree(d_b));
checkCudaErrors(cudaFree(d_c));
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++)
{
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16)
{
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
}
else
{
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
} | .file "tmpxft_00036a3e_00000000-6_err1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
.type _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i, @function
_Z30__device_stub__Z6kernelPdS_S_iPdS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6kernelPdS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i, .-_Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
.globl _Z6kernelPdS_S_i
.type _Z6kernelPdS_S_i, @function
_Z6kernelPdS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6kernelPdS_S_i, .-_Z6kernelPdS_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "/home/ubuntu/Datasets/stackv2/train-structured/pedromachuca/ProgGpu/master/ProgCuda/CODE/Ex2/err1.cu"
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "CUDA error at %s:%d (%s) \n"
.section .rodata.str1.8
.align 8
.LC6:
.string "SUCCESS (Relative error : %.3e)\n"
.align 8
.LC7:
.string "ERROR (Relative error : %.3e)\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $8000, %edi
call malloc@PLT
movq %rax, %rbp
movl $8000, %edi
call malloc@PLT
movq %rax, %rbx
movl $8000, %edi
call malloc@PLT
movq %rax, %r12
movl $0, %eax
movsd .LC1(%rip), %xmm1
.L12:
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
movapd %xmm0, %xmm2
addsd %xmm1, %xmm2
movapd %xmm1, %xmm3
divsd %xmm2, %xmm3
movsd %xmm3, 0(%rbp,%rax,8)
subsd %xmm1, %xmm0
divsd %xmm2, %xmm0
movsd %xmm0, (%rbx,%rax,8)
addq $1, %rax
cmpq $1000, %rax
jne .L12
leaq 8(%rsp), %rdi
movl $8000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $51, %r8d
leaq .LC2(%rip), %r14
movq %r14, %rcx
leaq .LC3(%rip), %r13
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
leaq 16(%rsp), %rdi
movl $8000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $52, %r8d
movq %r14, %rcx
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
leaq 24(%rsp), %rdi
movl $8000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $53, %r8d
movq %r14, %rcx
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %ecx
movl $8000, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $55, %r8d
movq %r14, %rcx
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %ecx
movl $8000, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $56, %r8d
movq %r14, %rcx
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $64, 32(%rsp)
movl $1, 36(%rsp)
movl $10, 44(%rsp)
movl $1, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movl $1, %ecx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L24
.L13:
movl $2, %ecx
movl $8000, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $62, %r8d
leaq .LC2(%rip), %r14
movq %r14, %rcx
leaq .LC3(%rip), %r13
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $64, %r8d
movq %r14, %rcx
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $65, %r8d
movq %r14, %rcx
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $66, %r8d
movq %r14, %rcx
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %eax
pxor %xmm4, %xmm4
movapd %xmm4, %xmm0
movq .LC4(%rip), %xmm5
.L14:
movsd (%r12,%rax), %xmm1
movsd 0(%rbp,%rax), %xmm3
addsd (%rbx,%rax), %xmm3
movapd %xmm1, %xmm2
subsd %xmm3, %xmm2
andpd %xmm5, %xmm2
addsd %xmm2, %xmm0
andpd %xmm5, %xmm1
addsd %xmm1, %xmm4
addq $8, %rax
cmpq $8000, %rax
jne .L14
divsd %xmm4, %xmm0
movsd .LC5(%rip), %xmm1
comisd %xmm0, %xmm1
jbe .L22
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
.L17:
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L25
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
movl $1000, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
jmp .L13
.L22:
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
jmp .L17
.L25:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z6kernelPdS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelPdS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1072693248
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC4:
.long -1
.long 2147483647
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC5:
.long -1747416644
.long 1016910514
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(val)\
fprintf(stderr, "CUDA error at %s:%d (%s) \n", __FILE__, __LINE__, cudaGetErrorString(val));
//Par rapport a la question 7 N = 1000 et nb thread = 640
// =>si on fait 2 x nb_thread alors 1280 threads > N peut causer bufferoverflow/seg fault
__global__ void kernel(double *a, double *b, double *c, int N)
{
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//Q 8 :
/*
int i = 2*(blockIdx.x * blockDim.x + threadIdx.x);
if(i<N-1){
c[i] = a[i] + b[i];
c[i+1] = a[i+1] + b[i+1];
}*/
//Q 8 second way :
int i = blockIdx.x * blockDim.x + threadIdx.x;
int totalthreads = (blockDim.x * gridDim.x);
c[i] = a[i] + b[i];
if(i<(N-totalthreads)){
c[i+totalthreads] = a[i+totalthreads] + b[i+totalthreads];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++)
{
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(cudaMalloc((void**)&d_a, sz_in_bytes));
checkCudaErrors(cudaMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(cudaMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(cudaMemcpy(d_a, h_a, sz_in_bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_b, h_b, sz_in_bytes, cudaMemcpyHostToDevice));
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(10, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(cudaMemcpy(h_c, d_c, sz_in_bytes, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_a));
checkCudaErrors(cudaFree(d_b));
checkCudaErrors(cudaFree(d_c));
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++)
{
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16)
{
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
}
else
{
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(val)\
fprintf(stderr, "CUDA error at %s:%d (%s) \n", __FILE__, __LINE__, hipGetErrorString(val));
//Par rapport a la question 7 N = 1000 et nb thread = 640
// =>si on fait 2 x nb_thread alors 1280 threads > N peut causer bufferoverflow/seg fault
__global__ void kernel(double *a, double *b, double *c, int N)
{
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//Q 8 :
/*
int i = 2*(blockIdx.x * blockDim.x + threadIdx.x);
if(i<N-1){
c[i] = a[i] + b[i];
c[i+1] = a[i+1] + b[i+1];
}*/
//Q 8 second way :
int i = blockIdx.x * blockDim.x + threadIdx.x;
int totalthreads = (blockDim.x * gridDim.x);
c[i] = a[i] + b[i];
if(i<(N-totalthreads)){
c[i+totalthreads] = a[i+totalthreads] + b[i+totalthreads];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++)
{
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(hipMalloc((void**)&d_a, sz_in_bytes));
checkCudaErrors(hipMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(hipMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(hipMemcpy(d_a, h_a, sz_in_bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_b, h_b, sz_in_bytes, hipMemcpyHostToDevice));
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(10, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(hipMemcpy(h_c, d_c, sz_in_bytes, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_a));
checkCudaErrors(hipFree(d_b));
checkCudaErrors(hipFree(d_c));
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++)
{
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16)
{
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
}
else
{
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(val)\
fprintf(stderr, "CUDA error at %s:%d (%s) \n", __FILE__, __LINE__, hipGetErrorString(val));
//Par rapport a la question 7 N = 1000 et nb thread = 640
// =>si on fait 2 x nb_thread alors 1280 threads > N peut causer bufferoverflow/seg fault
__global__ void kernel(double *a, double *b, double *c, int N)
{
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//Q 8 :
/*
int i = 2*(blockIdx.x * blockDim.x + threadIdx.x);
if(i<N-1){
c[i] = a[i] + b[i];
c[i+1] = a[i+1] + b[i+1];
}*/
//Q 8 second way :
int i = blockIdx.x * blockDim.x + threadIdx.x;
int totalthreads = (blockDim.x * gridDim.x);
c[i] = a[i] + b[i];
if(i<(N-totalthreads)){
c[i+totalthreads] = a[i+totalthreads] + b[i+totalthreads];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++)
{
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(hipMalloc((void**)&d_a, sz_in_bytes));
checkCudaErrors(hipMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(hipMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(hipMemcpy(d_a, h_a, sz_in_bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_b, h_b, sz_in_bytes, hipMemcpyHostToDevice));
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(10, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(hipMemcpy(h_c, d_c, sz_in_bytes, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_a));
checkCudaErrors(hipFree(d_b));
checkCudaErrors(hipFree(d_c));
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++)
{
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16)
{
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
}
else
{
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelPdS_S_i
.globl _Z6kernelPdS_S_i
.p2align 8
.type _Z6kernelPdS_S_i,@function
_Z6kernelPdS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b128 s[4:7], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s2, 0xffff
s_load_b64 s[2:3], s[0:1], 0x10
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[2:3], 3, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v2
global_load_b64 v[4:5], v[4:5], off
global_load_b64 v[6:7], v[6:7], off
s_clause 0x1
s_load_b32 s9, s[0:1], 0x20
s_load_b32 s1, s[0:1], 0x18
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
s_waitcnt lgkmcnt(0)
s_mul_i32 s0, s9, s8
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_sub_i32 s1, s1, s0
v_cmp_gt_i32_e32 vcc_lo, s1, v1
s_waitcnt vmcnt(0)
v_add_f64 v[4:5], v[4:5], v[6:7]
global_store_b64 v[2:3], v[4:5], off
s_and_saveexec_b32 s1, vcc_lo
s_cbranch_execz .LBB0_2
v_add_nc_u32_e32 v0, s0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 3, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
global_load_b64 v[2:3], v[2:3], off
global_load_b64 v[4:5], v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[4:5]
global_store_b64 v[0:1], v[2:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelPdS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelPdS_S_i, .Lfunc_end0-_Z6kernelPdS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelPdS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6kernelPdS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(val)\
fprintf(stderr, "CUDA error at %s:%d (%s) \n", __FILE__, __LINE__, hipGetErrorString(val));
//Par rapport a la question 7 N = 1000 et nb thread = 640
// =>si on fait 2 x nb_thread alors 1280 threads > N peut causer bufferoverflow/seg fault
__global__ void kernel(double *a, double *b, double *c, int N)
{
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//Q 8 :
/*
int i = 2*(blockIdx.x * blockDim.x + threadIdx.x);
if(i<N-1){
c[i] = a[i] + b[i];
c[i+1] = a[i+1] + b[i+1];
}*/
//Q 8 second way :
int i = blockIdx.x * blockDim.x + threadIdx.x;
int totalthreads = (blockDim.x * gridDim.x);
c[i] = a[i] + b[i];
if(i<(N-totalthreads)){
c[i+totalthreads] = a[i+totalthreads] + b[i+totalthreads];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++)
{
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(hipMalloc((void**)&d_a, sz_in_bytes));
checkCudaErrors(hipMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(hipMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(hipMemcpy(d_a, h_a, sz_in_bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_b, h_b, sz_in_bytes, hipMemcpyHostToDevice));
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(10, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(hipMemcpy(h_c, d_c, sz_in_bytes, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_a));
checkCudaErrors(hipFree(d_b));
checkCudaErrors(hipFree(d_c));
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++)
{
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16)
{
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
}
else
{
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
} | .text
.file "err1.hip"
.globl _Z21__device_stub__kernelPdS_S_i # -- Begin function _Z21__device_stub__kernelPdS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__kernelPdS_S_i,@function
_Z21__device_stub__kernelPdS_S_i: # @_Z21__device_stub__kernelPdS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelPdS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelPdS_S_i, .Lfunc_end0-_Z21__device_stub__kernelPdS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x3ff0000000000000 # double 1
.LCPI1_1:
.quad 0xbff0000000000000 # double -1
.LCPI1_3:
.quad 0x3c9cd2b297d889bc # double 9.9999999999999997E-17
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI1_2:
.quad 0x7fffffffffffffff # double NaN
.quad 0x7fffffffffffffff # double NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $144, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $8000, %edi # imm = 0x1F40
callq malloc
movq %rax, %rbx
movl $8000, %edi # imm = 0x1F40
callq malloc
movq %rax, %r14
movl $8000, %edi # imm = 0x1F40
callq malloc
movq %rax, %r15
xorl %eax, %eax
movsd .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero
movsd .LCPI1_1(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm2, %xmm2
cvtsi2sd %eax, %xmm2
movapd %xmm2, %xmm3
addsd %xmm0, %xmm3
movapd %xmm0, %xmm4
divsd %xmm3, %xmm4
movsd %xmm4, (%rbx,%rax,8)
addsd %xmm1, %xmm2
divsd %xmm3, %xmm2
movsd %xmm2, (%r14,%rax,8)
incq %rax
cmpq $1000, %rax # imm = 0x3E8
jne .LBB1_1
# %bb.2:
movq stderr(%rip), %r12
leaq 24(%rsp), %rdi
movl $8000, %esi # imm = 0x1F40
callq hipMalloc
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $53, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movq stderr(%rip), %r12
leaq 16(%rsp), %rdi
movl $8000, %esi # imm = 0x1F40
callq hipMalloc
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $54, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movq stderr(%rip), %r12
leaq 8(%rsp), %rdi
movl $8000, %esi # imm = 0x1F40
callq hipMalloc
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $55, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movq stderr(%rip), %r12
movq 24(%rsp), %rdi
movl $8000, %edx # imm = 0x1F40
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $57, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movq stderr(%rip), %r12
movq 16(%rsp), %rdi
movl $8000, %edx # imm = 0x1F40
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $58, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movabsq $4294967306, %rdi # imm = 0x10000000A
leaq 54(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $1000, 36(%rsp) # imm = 0x3E8
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z6kernelPdS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq stderr(%rip), %r12
movq 8(%rsp), %rsi
movl $8000, %edx # imm = 0x1F40
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movl %eax, %edi
callq hipGetErrorString
xorl %r13d, %r13d
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $64, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movq stderr(%rip), %r12
movq 24(%rsp), %rdi
callq hipFree
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $66, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movq stderr(%rip), %r12
movq 16(%rsp), %rdi
callq hipFree
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $67, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movq stderr(%rip), %r12
movq 8(%rsp), %rdi
callq hipFree
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $68, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
xorpd %xmm1, %xmm1
movapd .LCPI1_2(%rip), %xmm2 # xmm2 = [NaN,NaN]
xorpd %xmm0, %xmm0
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movsd (%r15,%r13,8), %xmm3 # xmm3 = mem[0],zero
movsd (%rbx,%r13,8), %xmm4 # xmm4 = mem[0],zero
addsd (%r14,%r13,8), %xmm4
movapd %xmm3, %xmm5
subsd %xmm4, %xmm5
andpd %xmm2, %xmm5
addsd %xmm5, %xmm0
andpd %xmm2, %xmm3
addsd %xmm3, %xmm1
incq %r13
cmpq $1000, %r13 # imm = 0x3E8
jne .LBB1_5
# %bb.6:
divsd %xmm1, %xmm0
movsd .LCPI1_3(%rip), %xmm1 # xmm1 = mem[0],zero
ucomisd %xmm0, %xmm1
movl $.L.str.2, %eax
movl $.L.str.3, %edi
cmovaq %rax, %rdi
movb $1, %al
callq printf
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
addq $144, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelPdS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelPdS_S_i,@object # @_Z6kernelPdS_S_i
.section .rodata,"a",@progbits
.globl _Z6kernelPdS_S_i
.p2align 3, 0x0
_Z6kernelPdS_S_i:
.quad _Z21__device_stub__kernelPdS_S_i
.size _Z6kernelPdS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "CUDA error at %s:%d (%s) \n"
.size .L.str, 27
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/pedromachuca/ProgGpu/master/ProgCuda/CODE/Ex2/err1.hip"
.size .L.str.1, 112
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "SUCCESS (Relative error : %.3e)\n"
.size .L.str.2, 33
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "ERROR (Relative error : %.3e)\n"
.size .L.str.3, 31
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kernelPdS_S_i"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelPdS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelPdS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6kernelPdS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R15, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff0f7435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R4, R0, R15, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e020f */
/*0070*/ IMAD.WIDE R2, R0, R15, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fe200078e020f */
/*0080*/ LDG.E.64 R6, [R4.64] ; /* 0x0000000404067981 */
/* 0x000ea8000c1e1b00 */
/*0090*/ LDG.E.64 R8, [R2.64] ; /* 0x0000000402087981 */
/* 0x000ea2000c1e1b00 */
/*00a0*/ MOV R11, c[0x0][0x0] ; /* 0x00000000000b7a02 */
/* 0x000fca0000000f00 */
/*00b0*/ IMAD R11, R11, c[0x0][0xc], RZ ; /* 0x000003000b0b7a24 */
/* 0x000fca00078e02ff */
/*00c0*/ IADD3 R13, -R11, c[0x0][0x178], RZ ; /* 0x00005e000b0d7a10 */
/* 0x000fc80007ffe1ff */
/*00d0*/ ISETP.GE.AND P0, PT, R0.reuse, R13, PT ; /* 0x0000000d0000720c */
/* 0x040fe20003f06270 */
/*00e0*/ DADD R6, R6, R8 ; /* 0x0000000006067229 */
/* 0x0040640000000008 */
/*00f0*/ IMAD.WIDE R8, R0, R15, c[0x0][0x170] ; /* 0x00005c0000087625 */
/* 0x001fca00078e020f */
/*0100*/ STG.E.64 [R8.64], R6 ; /* 0x0000000608007986 */
/* 0x0021ea000c101b04 */
/*0110*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0120*/ IMAD.WIDE R4, R11, 0x8, R4 ; /* 0x000000080b047825 */
/* 0x000fc800078e0204 */
/*0130*/ IMAD.WIDE R2, R11.reuse, 0x8, R2 ; /* 0x000000080b027825 */
/* 0x040fe400078e0202 */
/*0140*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1b00 */
/*0150*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*0160*/ IMAD.WIDE R8, R11, 0x8, R8 ; /* 0x000000080b087825 */
/* 0x001fe200078e0208 */
/*0170*/ DADD R6, R4, R2 ; /* 0x0000000004067229 */
/* 0x004e0e0000000002 */
/*0180*/ STG.E.64 [R8.64], R6 ; /* 0x0000000608007986 */
/* 0x001fe2000c101b04 */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ BRA 0x1a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelPdS_S_i
.globl _Z6kernelPdS_S_i
.p2align 8
.type _Z6kernelPdS_S_i,@function
_Z6kernelPdS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b128 s[4:7], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s2, 0xffff
s_load_b64 s[2:3], s[0:1], 0x10
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[2:3], 3, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v2
global_load_b64 v[4:5], v[4:5], off
global_load_b64 v[6:7], v[6:7], off
s_clause 0x1
s_load_b32 s9, s[0:1], 0x20
s_load_b32 s1, s[0:1], 0x18
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
s_waitcnt lgkmcnt(0)
s_mul_i32 s0, s9, s8
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_sub_i32 s1, s1, s0
v_cmp_gt_i32_e32 vcc_lo, s1, v1
s_waitcnt vmcnt(0)
v_add_f64 v[4:5], v[4:5], v[6:7]
global_store_b64 v[2:3], v[4:5], off
s_and_saveexec_b32 s1, vcc_lo
s_cbranch_execz .LBB0_2
v_add_nc_u32_e32 v0, s0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 3, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
global_load_b64 v[2:3], v[2:3], off
global_load_b64 v[4:5], v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[4:5]
global_store_b64 v[0:1], v[2:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelPdS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelPdS_S_i, .Lfunc_end0-_Z6kernelPdS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelPdS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6kernelPdS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00036a3e_00000000-6_err1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
.type _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i, @function
_Z30__device_stub__Z6kernelPdS_S_iPdS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6kernelPdS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i, .-_Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
.globl _Z6kernelPdS_S_i
.type _Z6kernelPdS_S_i, @function
_Z6kernelPdS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6kernelPdS_S_i, .-_Z6kernelPdS_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "/home/ubuntu/Datasets/stackv2/train-structured/pedromachuca/ProgGpu/master/ProgCuda/CODE/Ex2/err1.cu"
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "CUDA error at %s:%d (%s) \n"
.section .rodata.str1.8
.align 8
.LC6:
.string "SUCCESS (Relative error : %.3e)\n"
.align 8
.LC7:
.string "ERROR (Relative error : %.3e)\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $8000, %edi
call malloc@PLT
movq %rax, %rbp
movl $8000, %edi
call malloc@PLT
movq %rax, %rbx
movl $8000, %edi
call malloc@PLT
movq %rax, %r12
movl $0, %eax
movsd .LC1(%rip), %xmm1
.L12:
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
movapd %xmm0, %xmm2
addsd %xmm1, %xmm2
movapd %xmm1, %xmm3
divsd %xmm2, %xmm3
movsd %xmm3, 0(%rbp,%rax,8)
subsd %xmm1, %xmm0
divsd %xmm2, %xmm0
movsd %xmm0, (%rbx,%rax,8)
addq $1, %rax
cmpq $1000, %rax
jne .L12
leaq 8(%rsp), %rdi
movl $8000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $51, %r8d
leaq .LC2(%rip), %r14
movq %r14, %rcx
leaq .LC3(%rip), %r13
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
leaq 16(%rsp), %rdi
movl $8000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $52, %r8d
movq %r14, %rcx
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
leaq 24(%rsp), %rdi
movl $8000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $53, %r8d
movq %r14, %rcx
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %ecx
movl $8000, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $55, %r8d
movq %r14, %rcx
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %ecx
movl $8000, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $56, %r8d
movq %r14, %rcx
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $64, 32(%rsp)
movl $1, 36(%rsp)
movl $10, 44(%rsp)
movl $1, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movl $1, %ecx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L24
.L13:
movl $2, %ecx
movl $8000, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $62, %r8d
leaq .LC2(%rip), %r14
movq %r14, %rcx
leaq .LC3(%rip), %r13
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $64, %r8d
movq %r14, %rcx
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $65, %r8d
movq %r14, %rcx
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $66, %r8d
movq %r14, %rcx
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %eax
pxor %xmm4, %xmm4
movapd %xmm4, %xmm0
movq .LC4(%rip), %xmm5
.L14:
movsd (%r12,%rax), %xmm1
movsd 0(%rbp,%rax), %xmm3
addsd (%rbx,%rax), %xmm3
movapd %xmm1, %xmm2
subsd %xmm3, %xmm2
andpd %xmm5, %xmm2
addsd %xmm2, %xmm0
andpd %xmm5, %xmm1
addsd %xmm1, %xmm4
addq $8, %rax
cmpq $8000, %rax
jne .L14
divsd %xmm4, %xmm0
movsd .LC5(%rip), %xmm1
comisd %xmm0, %xmm1
jbe .L22
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
.L17:
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L25
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
movl $1000, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z30__device_stub__Z6kernelPdS_S_iPdS_S_i
jmp .L13
.L22:
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
jmp .L17
.L25:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z6kernelPdS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelPdS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1072693248
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC4:
.long -1
.long 2147483647
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC5:
.long -1747416644
.long 1016910514
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "err1.hip"
.globl _Z21__device_stub__kernelPdS_S_i # -- Begin function _Z21__device_stub__kernelPdS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__kernelPdS_S_i,@function
_Z21__device_stub__kernelPdS_S_i: # @_Z21__device_stub__kernelPdS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelPdS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelPdS_S_i, .Lfunc_end0-_Z21__device_stub__kernelPdS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x3ff0000000000000 # double 1
.LCPI1_1:
.quad 0xbff0000000000000 # double -1
.LCPI1_3:
.quad 0x3c9cd2b297d889bc # double 9.9999999999999997E-17
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI1_2:
.quad 0x7fffffffffffffff # double NaN
.quad 0x7fffffffffffffff # double NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $144, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $8000, %edi # imm = 0x1F40
callq malloc
movq %rax, %rbx
movl $8000, %edi # imm = 0x1F40
callq malloc
movq %rax, %r14
movl $8000, %edi # imm = 0x1F40
callq malloc
movq %rax, %r15
xorl %eax, %eax
movsd .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero
movsd .LCPI1_1(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm2, %xmm2
cvtsi2sd %eax, %xmm2
movapd %xmm2, %xmm3
addsd %xmm0, %xmm3
movapd %xmm0, %xmm4
divsd %xmm3, %xmm4
movsd %xmm4, (%rbx,%rax,8)
addsd %xmm1, %xmm2
divsd %xmm3, %xmm2
movsd %xmm2, (%r14,%rax,8)
incq %rax
cmpq $1000, %rax # imm = 0x3E8
jne .LBB1_1
# %bb.2:
movq stderr(%rip), %r12
leaq 24(%rsp), %rdi
movl $8000, %esi # imm = 0x1F40
callq hipMalloc
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $53, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movq stderr(%rip), %r12
leaq 16(%rsp), %rdi
movl $8000, %esi # imm = 0x1F40
callq hipMalloc
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $54, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movq stderr(%rip), %r12
leaq 8(%rsp), %rdi
movl $8000, %esi # imm = 0x1F40
callq hipMalloc
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $55, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movq stderr(%rip), %r12
movq 24(%rsp), %rdi
movl $8000, %edx # imm = 0x1F40
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $57, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movq stderr(%rip), %r12
movq 16(%rsp), %rdi
movl $8000, %edx # imm = 0x1F40
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $58, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movabsq $4294967306, %rdi # imm = 0x10000000A
leaq 54(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $1000, 36(%rsp) # imm = 0x3E8
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z6kernelPdS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq stderr(%rip), %r12
movq 8(%rsp), %rsi
movl $8000, %edx # imm = 0x1F40
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movl %eax, %edi
callq hipGetErrorString
xorl %r13d, %r13d
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $64, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movq stderr(%rip), %r12
movq 24(%rsp), %rdi
callq hipFree
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $66, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movq stderr(%rip), %r12
movq 16(%rsp), %rdi
callq hipFree
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $67, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movq stderr(%rip), %r12
movq 8(%rsp), %rdi
callq hipFree
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %r12, %rdi
movl $68, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
xorpd %xmm1, %xmm1
movapd .LCPI1_2(%rip), %xmm2 # xmm2 = [NaN,NaN]
xorpd %xmm0, %xmm0
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movsd (%r15,%r13,8), %xmm3 # xmm3 = mem[0],zero
movsd (%rbx,%r13,8), %xmm4 # xmm4 = mem[0],zero
addsd (%r14,%r13,8), %xmm4
movapd %xmm3, %xmm5
subsd %xmm4, %xmm5
andpd %xmm2, %xmm5
addsd %xmm5, %xmm0
andpd %xmm2, %xmm3
addsd %xmm3, %xmm1
incq %r13
cmpq $1000, %r13 # imm = 0x3E8
jne .LBB1_5
# %bb.6:
divsd %xmm1, %xmm0
movsd .LCPI1_3(%rip), %xmm1 # xmm1 = mem[0],zero
ucomisd %xmm0, %xmm1
movl $.L.str.2, %eax
movl $.L.str.3, %edi
cmovaq %rax, %rdi
movb $1, %al
callq printf
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
addq $144, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelPdS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelPdS_S_i,@object # @_Z6kernelPdS_S_i
.section .rodata,"a",@progbits
.globl _Z6kernelPdS_S_i
.p2align 3, 0x0
_Z6kernelPdS_S_i:
.quad _Z21__device_stub__kernelPdS_S_i
.size _Z6kernelPdS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "CUDA error at %s:%d (%s) \n"
.size .L.str, 27
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/pedromachuca/ProgGpu/master/ProgCuda/CODE/Ex2/err1.hip"
.size .L.str.1, 112
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "SUCCESS (Relative error : %.3e)\n"
.size .L.str.2, 33
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "ERROR (Relative error : %.3e)\n"
.size .L.str.3, 31
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kernelPdS_S_i"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelPdS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelPdS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda_runtime.h>
__global__ void binarizeKernel(uchar4* pData, unsigned char threshold)
{
// get the position for the current thread
unsigned int x = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// calculate the memory adress
const unsigned int tid = y * (gridDim.x * blockDim.x) + x;
// get binarization result
unsigned char value = pData[tid].x > threshold ? 255 : 0;
// write the value back to the global memory
pData[tid].x = value;
pData[tid].y = value;
pData[tid].z = value;
}
void binarize(uchar4* pDataIn, uchar4* pDataOut, int width, int height, unsigned char threshold)
{
// allocate device memory
uchar4* pDevData;
unsigned int mem_size = sizeof(uchar4) * width * height;
cudaMalloc((void **) &pDevData, mem_size);
// copy results from host to device
cudaMemcpy(pDevData, pDataIn, mem_size, cudaMemcpyHostToDevice);
// define partitioning
dim3 threadsPerBlock(8, 8);
dim3 numBlocks(width / threadsPerBlock.x, height / threadsPerBlock.y);
// run the cuda kernel
binarizeKernel<<<numBlocks, threadsPerBlock>>>(pDevData, threshold);
// copy results from device to host
cudaMemcpy(pDataOut, pDevData, mem_size, cudaMemcpyDeviceToHost);
// cleanup memory
cudaFree(pDevData);
} | code for sm_80
Function : _Z14binarizeKernelP6uchar4h
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e220000002600 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */
/* 0x000e280000002200 */
/*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e680000002500 */
/*0050*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000ea20000002100 */
/*0060*/ IMAD R0, R0, c[0x0][0x4], R7 ; /* 0x0000010000007a24 */
/* 0x001fc800078e0207 */
/*0070*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x002fe200078e0203 */
/*0080*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fc600000001ff */
/*0090*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x004fce00078e0205 */
/*00a0*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0003 */
/*00b0*/ LDG.E.U8 R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea2000c1e1100 */
/*00c0*/ ULDC.U8 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */
/* 0x000fe40000000000 */
/*00d0*/ ISETP.GT.U32.AND P0, PT, R0, UR6, PT ; /* 0x0000000600007c0c */
/* 0x004fc8000bf04070 */
/*00e0*/ SEL R5, RZ, 0xffff, !P0 ; /* 0x0000ffffff057807 */
/* 0x000fc80004000000 */
/*00f0*/ PRMT R7, R5, 0x7604, R5 ; /* 0x0000760405077816 */
/* 0x000fe20000000005 */
/*0100*/ STG.E.U8 [R2.64+0x2], R5 ; /* 0x0000020502007986 */
/* 0x000fe8000c101104 */
/*0110*/ STG.E.U16 [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101504 */
/*0120*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0130*/ BRA 0x130; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda_runtime.h>
__global__ void binarizeKernel(uchar4* pData, unsigned char threshold)
{
// get the position for the current thread
unsigned int x = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// calculate the memory adress
const unsigned int tid = y * (gridDim.x * blockDim.x) + x;
// get binarization result
unsigned char value = pData[tid].x > threshold ? 255 : 0;
// write the value back to the global memory
pData[tid].x = value;
pData[tid].y = value;
pData[tid].z = value;
}
void binarize(uchar4* pDataIn, uchar4* pDataOut, int width, int height, unsigned char threshold)
{
// allocate device memory
uchar4* pDevData;
unsigned int mem_size = sizeof(uchar4) * width * height;
cudaMalloc((void **) &pDevData, mem_size);
// copy results from host to device
cudaMemcpy(pDevData, pDataIn, mem_size, cudaMemcpyHostToDevice);
// define partitioning
dim3 threadsPerBlock(8, 8);
dim3 numBlocks(width / threadsPerBlock.x, height / threadsPerBlock.y);
// run the cuda kernel
binarizeKernel<<<numBlocks, threadsPerBlock>>>(pDevData, threshold);
// copy results from device to host
cudaMemcpy(pDataOut, pDevData, mem_size, cudaMemcpyDeviceToHost);
// cleanup memory
cudaFree(pDevData);
} | .file "tmpxft_00013570_00000000-6_Binarize.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z41__device_stub__Z14binarizeKernelP6uchar4hP6uchar4h
.type _Z41__device_stub__Z14binarizeKernelP6uchar4hP6uchar4h, @function
_Z41__device_stub__Z14binarizeKernelP6uchar4hP6uchar4h:
.LFB2052:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movb %sil, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z14binarizeKernelP6uchar4h(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z41__device_stub__Z14binarizeKernelP6uchar4hP6uchar4h, .-_Z41__device_stub__Z14binarizeKernelP6uchar4hP6uchar4h
.globl _Z14binarizeKernelP6uchar4h
.type _Z14binarizeKernelP6uchar4h, @function
_Z14binarizeKernelP6uchar4h:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movzbl %sil, %esi
call _Z41__device_stub__Z14binarizeKernelP6uchar4hP6uchar4h
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z14binarizeKernelP6uchar4h, .-_Z14binarizeKernelP6uchar4h
.globl _Z8binarizeP6uchar4S0_iih
.type _Z8binarizeP6uchar4S0_iih, @function
_Z8binarizeP6uchar4S0_iih:
.LFB2027:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r15
movq %rsi, %r12
movl %edx, %ebp
movl %ecx, %ebx
movl %r8d, %r14d
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl %edx, %eax
imull %ecx, %eax
leal 0(,%rax,4), %r13d
leaq 8(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r13, %rdx
movq %r15, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
shrl $3, %ebp
movl %ebp, 28(%rsp)
shrl $3, %ebx
movl %ebx, 32(%rsp)
movl $8, 16(%rsp)
movl $8, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 16(%rsp), %rdx
movl $1, %ecx
movq 28(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
movl $2, %ecx
movq %r13, %rdx
movq 8(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movzbl %r14b, %esi
movq 8(%rsp), %rdi
call _Z41__device_stub__Z14binarizeKernelP6uchar4hP6uchar4h
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size _Z8binarizeP6uchar4S0_iih, .-_Z8binarizeP6uchar4S0_iih
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z14binarizeKernelP6uchar4h"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14binarizeKernelP6uchar4h(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda_runtime.h>
__global__ void binarizeKernel(uchar4* pData, unsigned char threshold)
{
// get the position for the current thread
unsigned int x = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// calculate the memory adress
const unsigned int tid = y * (gridDim.x * blockDim.x) + x;
// get binarization result
unsigned char value = pData[tid].x > threshold ? 255 : 0;
// write the value back to the global memory
pData[tid].x = value;
pData[tid].y = value;
pData[tid].z = value;
}
void binarize(uchar4* pDataIn, uchar4* pDataOut, int width, int height, unsigned char threshold)
{
// allocate device memory
uchar4* pDevData;
unsigned int mem_size = sizeof(uchar4) * width * height;
cudaMalloc((void **) &pDevData, mem_size);
// copy results from host to device
cudaMemcpy(pDevData, pDataIn, mem_size, cudaMemcpyHostToDevice);
// define partitioning
dim3 threadsPerBlock(8, 8);
dim3 numBlocks(width / threadsPerBlock.x, height / threadsPerBlock.y);
// run the cuda kernel
binarizeKernel<<<numBlocks, threadsPerBlock>>>(pDevData, threshold);
// copy results from device to host
cudaMemcpy(pDataOut, pDevData, mem_size, cudaMemcpyDeviceToHost);
// cleanup memory
cudaFree(pDevData);
} | #include <hip/hip_runtime.h>
__global__ void binarizeKernel(uchar4* pData, unsigned char threshold)
{
// get the position for the current thread
unsigned int x = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// calculate the memory adress
const unsigned int tid = y * (gridDim.x * blockDim.x) + x;
// get binarization result
unsigned char value = pData[tid].x > threshold ? 255 : 0;
// write the value back to the global memory
pData[tid].x = value;
pData[tid].y = value;
pData[tid].z = value;
}
void binarize(uchar4* pDataIn, uchar4* pDataOut, int width, int height, unsigned char threshold)
{
// allocate device memory
uchar4* pDevData;
unsigned int mem_size = sizeof(uchar4) * width * height;
hipMalloc((void **) &pDevData, mem_size);
// copy results from host to device
hipMemcpy(pDevData, pDataIn, mem_size, hipMemcpyHostToDevice);
// define partitioning
dim3 threadsPerBlock(8, 8);
dim3 numBlocks(width / threadsPerBlock.x, height / threadsPerBlock.y);
// run the cuda kernel
binarizeKernel<<<numBlocks, threadsPerBlock>>>(pDevData, threshold);
// copy results from device to host
hipMemcpy(pDataOut, pDevData, mem_size, hipMemcpyDeviceToHost);
// cleanup memory
hipFree(pDevData);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
__global__ void binarizeKernel(uchar4* pData, unsigned char threshold)
{
// get the position for the current thread
unsigned int x = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// calculate the memory adress
const unsigned int tid = y * (gridDim.x * blockDim.x) + x;
// get binarization result
unsigned char value = pData[tid].x > threshold ? 255 : 0;
// write the value back to the global memory
pData[tid].x = value;
pData[tid].y = value;
pData[tid].z = value;
}
void binarize(uchar4* pDataIn, uchar4* pDataOut, int width, int height, unsigned char threshold)
{
// allocate device memory
uchar4* pDevData;
unsigned int mem_size = sizeof(uchar4) * width * height;
hipMalloc((void **) &pDevData, mem_size);
// copy results from host to device
hipMemcpy(pDevData, pDataIn, mem_size, hipMemcpyHostToDevice);
// define partitioning
dim3 threadsPerBlock(8, 8);
dim3 numBlocks(width / threadsPerBlock.x, height / threadsPerBlock.y);
// run the cuda kernel
binarizeKernel<<<numBlocks, threadsPerBlock>>>(pDevData, threshold);
// copy results from device to host
hipMemcpy(pDataOut, pDevData, mem_size, hipMemcpyDeviceToHost);
// cleanup memory
hipFree(pDevData);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.globl _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.p2align 8
.type _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh,@function
_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b32 s2, s[0:1], 0x10
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s3, s4, 16
s_and_b32 s4, s4, 0xffff
v_mad_u64_u32 v[2:3], null, s15, s3, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[3:4], null, v2, s2, s[14:15]
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s0, s[0:1], 0x8
v_mad_u64_u32 v[1:2], null, v3, s4, v[0:1]
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
v_and_b32_e64 v3, 0xff, s0
global_load_u8 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_cmp_gt_u16_e32 vcc_lo, v2, v3
v_cndmask_b32_e64 v2, 0, -1, vcc_lo
s_clause 0x2
global_store_b8 v[0:1], v2, off
global_store_b8 v[0:1], v2, off offset:1
global_store_b8 v[0:1], v2, off offset:2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh, .Lfunc_end0-_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 1
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
__global__ void binarizeKernel(uchar4* pData, unsigned char threshold)
{
// get the position for the current thread
unsigned int x = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// calculate the memory adress
const unsigned int tid = y * (gridDim.x * blockDim.x) + x;
// get binarization result
unsigned char value = pData[tid].x > threshold ? 255 : 0;
// write the value back to the global memory
pData[tid].x = value;
pData[tid].y = value;
pData[tid].z = value;
}
void binarize(uchar4* pDataIn, uchar4* pDataOut, int width, int height, unsigned char threshold)
{
// allocate device memory
uchar4* pDevData;
unsigned int mem_size = sizeof(uchar4) * width * height;
hipMalloc((void **) &pDevData, mem_size);
// copy results from host to device
hipMemcpy(pDevData, pDataIn, mem_size, hipMemcpyHostToDevice);
// define partitioning
dim3 threadsPerBlock(8, 8);
dim3 numBlocks(width / threadsPerBlock.x, height / threadsPerBlock.y);
// run the cuda kernel
binarizeKernel<<<numBlocks, threadsPerBlock>>>(pDevData, threshold);
// copy results from device to host
hipMemcpy(pDataOut, pDevData, mem_size, hipMemcpyDeviceToHost);
// cleanup memory
hipFree(pDevData);
} | .text
.file "Binarize.hip"
.globl _Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh # -- Begin function _Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh
.p2align 4, 0x90
.type _Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh,@function
_Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh: # @_Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movb %sil, 7(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 7(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh, .Lfunc_end0-_Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh
.cfi_endproc
# -- End function
.globl _Z8binarizeP15HIP_vector_typeIhLj4EES1_iih # -- Begin function _Z8binarizeP15HIP_vector_typeIhLj4EES1_iih
.p2align 4, 0x90
.type _Z8binarizeP15HIP_vector_typeIhLj4EES1_iih,@function
_Z8binarizeP15HIP_vector_typeIhLj4EES1_iih: # @_Z8binarizeP15HIP_vector_typeIhLj4EES1_iih
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $104, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r8d, %ebp
movl %ecx, %r15d
movl %edx, %r12d
movq %rsi, %r14
movq %rdi, %r13
movl %edx, %ebx
imull %ecx, %ebx
shll $2, %ebx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %r13, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
shrl $3, %r12d
shrl $3, %r15d
shlq $32, %r15
orq %r12, %r15
movabsq $34359738376, %rdx # imm = 0x800000008
movq %r15, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq %rax, 72(%rsp)
movb %bpl, 15(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 15(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 16(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
addq $104, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z8binarizeP15HIP_vector_typeIhLj4EES1_iih, .Lfunc_end1-_Z8binarizeP15HIP_vector_typeIhLj4EES1_iih
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh,@object # @_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.section .rodata,"a",@progbits
.globl _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.p2align 3, 0x0
_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh:
.quad _Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh
.size _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh"
.size .L__unnamed_1, 45
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z14binarizeKernelP6uchar4h
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e220000002600 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */
/* 0x000e280000002200 */
/*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e680000002500 */
/*0050*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000ea20000002100 */
/*0060*/ IMAD R0, R0, c[0x0][0x4], R7 ; /* 0x0000010000007a24 */
/* 0x001fc800078e0207 */
/*0070*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x002fe200078e0203 */
/*0080*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fc600000001ff */
/*0090*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x004fce00078e0205 */
/*00a0*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0003 */
/*00b0*/ LDG.E.U8 R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea2000c1e1100 */
/*00c0*/ ULDC.U8 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */
/* 0x000fe40000000000 */
/*00d0*/ ISETP.GT.U32.AND P0, PT, R0, UR6, PT ; /* 0x0000000600007c0c */
/* 0x004fc8000bf04070 */
/*00e0*/ SEL R5, RZ, 0xffff, !P0 ; /* 0x0000ffffff057807 */
/* 0x000fc80004000000 */
/*00f0*/ PRMT R7, R5, 0x7604, R5 ; /* 0x0000760405077816 */
/* 0x000fe20000000005 */
/*0100*/ STG.E.U8 [R2.64+0x2], R5 ; /* 0x0000020502007986 */
/* 0x000fe8000c101104 */
/*0110*/ STG.E.U16 [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101504 */
/*0120*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0130*/ BRA 0x130; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.globl _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.p2align 8
.type _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh,@function
_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b32 s2, s[0:1], 0x10
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s3, s4, 16
s_and_b32 s4, s4, 0xffff
v_mad_u64_u32 v[2:3], null, s15, s3, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[3:4], null, v2, s2, s[14:15]
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s0, s[0:1], 0x8
v_mad_u64_u32 v[1:2], null, v3, s4, v[0:1]
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
v_and_b32_e64 v3, 0xff, s0
global_load_u8 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_cmp_gt_u16_e32 vcc_lo, v2, v3
v_cndmask_b32_e64 v2, 0, -1, vcc_lo
s_clause 0x2
global_store_b8 v[0:1], v2, off
global_store_b8 v[0:1], v2, off offset:1
global_store_b8 v[0:1], v2, off offset:2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh, .Lfunc_end0-_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 1
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00013570_00000000-6_Binarize.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z41__device_stub__Z14binarizeKernelP6uchar4hP6uchar4h
.type _Z41__device_stub__Z14binarizeKernelP6uchar4hP6uchar4h, @function
_Z41__device_stub__Z14binarizeKernelP6uchar4hP6uchar4h:
.LFB2052:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movb %sil, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z14binarizeKernelP6uchar4h(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z41__device_stub__Z14binarizeKernelP6uchar4hP6uchar4h, .-_Z41__device_stub__Z14binarizeKernelP6uchar4hP6uchar4h
.globl _Z14binarizeKernelP6uchar4h
.type _Z14binarizeKernelP6uchar4h, @function
_Z14binarizeKernelP6uchar4h:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movzbl %sil, %esi
call _Z41__device_stub__Z14binarizeKernelP6uchar4hP6uchar4h
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z14binarizeKernelP6uchar4h, .-_Z14binarizeKernelP6uchar4h
.globl _Z8binarizeP6uchar4S0_iih
.type _Z8binarizeP6uchar4S0_iih, @function
_Z8binarizeP6uchar4S0_iih:
.LFB2027:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r15
movq %rsi, %r12
movl %edx, %ebp
movl %ecx, %ebx
movl %r8d, %r14d
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl %edx, %eax
imull %ecx, %eax
leal 0(,%rax,4), %r13d
leaq 8(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r13, %rdx
movq %r15, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
shrl $3, %ebp
movl %ebp, 28(%rsp)
shrl $3, %ebx
movl %ebx, 32(%rsp)
movl $8, 16(%rsp)
movl $8, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 16(%rsp), %rdx
movl $1, %ecx
movq 28(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
movl $2, %ecx
movq %r13, %rdx
movq 8(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movzbl %r14b, %esi
movq 8(%rsp), %rdi
call _Z41__device_stub__Z14binarizeKernelP6uchar4hP6uchar4h
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size _Z8binarizeP6uchar4S0_iih, .-_Z8binarizeP6uchar4S0_iih
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z14binarizeKernelP6uchar4h"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14binarizeKernelP6uchar4h(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "Binarize.hip"
.globl _Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh # -- Begin function _Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh
.p2align 4, 0x90
.type _Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh,@function
_Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh: # @_Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movb %sil, 7(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 7(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh, .Lfunc_end0-_Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh
.cfi_endproc
# -- End function
.globl _Z8binarizeP15HIP_vector_typeIhLj4EES1_iih # -- Begin function _Z8binarizeP15HIP_vector_typeIhLj4EES1_iih
.p2align 4, 0x90
.type _Z8binarizeP15HIP_vector_typeIhLj4EES1_iih,@function
_Z8binarizeP15HIP_vector_typeIhLj4EES1_iih: # @_Z8binarizeP15HIP_vector_typeIhLj4EES1_iih
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $104, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r8d, %ebp
movl %ecx, %r15d
movl %edx, %r12d
movq %rsi, %r14
movq %rdi, %r13
movl %edx, %ebx
imull %ecx, %ebx
shll $2, %ebx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %r13, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
shrl $3, %r12d
shrl $3, %r15d
shlq $32, %r15
orq %r12, %r15
movabsq $34359738376, %rdx # imm = 0x800000008
movq %r15, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq %rax, 72(%rsp)
movb %bpl, 15(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 15(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 16(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
addq $104, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z8binarizeP15HIP_vector_typeIhLj4EES1_iih, .Lfunc_end1-_Z8binarizeP15HIP_vector_typeIhLj4EES1_iih
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh,@object # @_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.section .rodata,"a",@progbits
.globl _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.p2align 3, 0x0
_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh:
.quad _Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh
.size _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z14binarizeKernelP15HIP_vector_typeIhLj4EEh"
.size .L__unnamed_1, 45
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__binarizeKernelP15HIP_vector_typeIhLj4EEh
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14binarizeKernelP15HIP_vector_typeIhLj4EEh
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
* EzUpdater.cpp
*
* Created on: 25 янв. 2016 г.
* Author: aleksandr
*/
#include "EzUpdater.h"
#include "SmartIndex.h"
// indx - индекс вдоль правой или левой границы по y от firstY до lastY
__host__ __device__
void EzUpdater::operator() (const int indx) {
/* correct Ez adjacent to TFSF boundary */
// correct Ez field along left edge
// mm = firstX;
// for (nn = firstY; nn <= lastY; nn++)
// Ez(mm, nn) -= Cezh(mm, nn) * Hy1G(g1, mm - 1);
//
// // correct Ez field along right edge
// mm = lastX;
// for (nn = firstY; nn <= lastY; nn++)
// Ez(mm, nn) += Cezh(mm, nn) * Hy1G(g1, mm);
float Cezh = S*377.0;
int m = firstX;
Ez(m, indx) = Ez(m, indx) - Cezh * Hy1D[m-1];
m = lastX;
Ez(m, indx) = Ez(m, indx) + Cezh * Hy1D[m];
} | code for sm_80
Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
* EzUpdater.cpp
*
* Created on: 25 янв. 2016 г.
* Author: aleksandr
*/
#include "EzUpdater.h"
#include "SmartIndex.h"
// indx - индекс вдоль правой или левой границы по y от firstY до lastY
__host__ __device__
void EzUpdater::operator() (const int indx) {
/* correct Ez adjacent to TFSF boundary */
// correct Ez field along left edge
// mm = firstX;
// for (nn = firstY; nn <= lastY; nn++)
// Ez(mm, nn) -= Cezh(mm, nn) * Hy1G(g1, mm - 1);
//
// // correct Ez field along right edge
// mm = lastX;
// for (nn = firstY; nn <= lastY; nn++)
// Ez(mm, nn) += Cezh(mm, nn) * Hy1G(g1, mm);
float Cezh = S*377.0;
int m = firstX;
Ez(m, indx) = Ez(m, indx) - Cezh * Hy1D[m-1];
m = lastX;
Ez(m, indx) = Ez(m, indx) + Cezh * Hy1D[m];
} | /*
* EzUpdater.cpp
*
* Created on: 25 янв. 2016 г.
* Author: aleksandr
*/
#include <hip/hip_runtime.h>
#include "EzUpdater.h"
#include "SmartIndex.h"
// indx - индекс вдоль правой или левой границы по y от firstY до lastY
__host__ __device__
void EzUpdater::operator() (const int indx) {
/* correct Ez adjacent to TFSF boundary */
// correct Ez field along left edge
// mm = firstX;
// for (nn = firstY; nn <= lastY; nn++)
// Ez(mm, nn) -= Cezh(mm, nn) * Hy1G(g1, mm - 1);
//
// // correct Ez field along right edge
// mm = lastX;
// for (nn = firstY; nn <= lastY; nn++)
// Ez(mm, nn) += Cezh(mm, nn) * Hy1G(g1, mm);
float Cezh = S*377.0;
int m = firstX;
Ez(m, indx) = Ez(m, indx) - Cezh * Hy1D[m-1];
m = lastX;
Ez(m, indx) = Ez(m, indx) + Cezh * Hy1D[m];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
* EzUpdater.cpp
*
* Created on: 25 янв. 2016 г.
* Author: aleksandr
*/
#include <hip/hip_runtime.h>
#include "EzUpdater.h"
#include "SmartIndex.h"
// indx - индекс вдоль правой или левой границы по y от firstY до lastY
__host__ __device__
void EzUpdater::operator() (const int indx) {
/* correct Ez adjacent to TFSF boundary */
// correct Ez field along left edge
// mm = firstX;
// for (nn = firstY; nn <= lastY; nn++)
// Ez(mm, nn) -= Cezh(mm, nn) * Hy1G(g1, mm - 1);
//
// // correct Ez field along right edge
// mm = lastX;
// for (nn = firstY; nn <= lastY; nn++)
// Ez(mm, nn) += Cezh(mm, nn) * Hy1G(g1, mm);
float Cezh = S*377.0;
int m = firstX;
Ez(m, indx) = Ez(m, indx) - Cezh * Hy1D[m-1];
m = lastX;
Ez(m, indx) = Ez(m, indx) + Cezh * Hy1D[m];
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
__global__ void vecMatSum(int *a, int *b, int *c, int width, int length){
int row = blockIdx.x*blockDim.x + threadIdx.x;
int col = blockIdx.y*blockDim.y + threadIdx.y;
int tid = row*width+col;
if(tid < length)
c[tid] = a[tid] + b[tid];
}
int main(int argc, char* argv[]){
//initialization code
int width,size,threads,blocks,totalSize;
float total_time;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if(argv[2])
width = atoi(argv[2]);
else
width = 300;
size = width*width;
if(argv[1])
threads = atoi(argv[1]);
else
threads = 16;
dim3 ThreadsInBlock(threads,threads); //will provide threads * threads threads
blocks = (int) sqrt((float) size / (float) (threads*threads));
dim3 BlockDim(blocks,blocks);
while(BlockDim.x*BlockDim.y*threads*threads < size)
BlockDim.y += 1;
totalSize = size*sizeof(int);
printf("\n%ix%i blocks of %ix%i threads = %i threads total \n",BlockDim.x,BlockDim.y,ThreadsInBlock.x,ThreadsInBlock.y,BlockDim.x*BlockDim.y*ThreadsInBlock.x*ThreadsInBlock.y);
//end init
//start mallocs
int *a,*dev_a,*b,*dev_b,*c,*dev_c;
cudaMalloc((void**)&dev_a,totalSize);
cudaMalloc((void**)&dev_b,totalSize);
cudaMalloc((void**)&dev_c,totalSize);
a = (int*) malloc(totalSize);
b = (int*) malloc(totalSize);
c = (int*) malloc(totalSize);
//end mallocs
//problem specific
int idx;
for(idx=0;idx<size;idx++){
a[idx] = idx;
b[idx] = idx*2;
}
//copy to dev
cudaMemcpy(dev_a,a,totalSize,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,totalSize,cudaMemcpyHostToDevice);
//end copy
int iteration = 0;
float avg_time = 0;
for(iteration=0;iteration<10;iteration++){
//call kernel and measure times
cudaEventRecord(start,0);
vecMatSum<<<BlockDim,ThreadsInBlock>>>(dev_a,dev_b,dev_c,width,size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&total_time,start,stop);
printf("\n time for %i blocks of %i threads : %f \n",blocks,threads,total_time);
avg_time+=total_time;
}
avg_time/=10.0;
printf("average time for %ix%i matrix sum is %f ",width,width,avg_time);
//copy back and prints
cudaMemcpy(c,dev_c,totalSize,cudaMemcpyDeviceToHost);
for(idx=0;idx<size;idx+=size/5)
printf("\n a[%i]=%i\n",idx,c[idx]);
//free
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
} | code for sm_80
Function : _Z9vecMatSumPiS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fc400078e0203 */
/*0060*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */
/* 0x002fc800078e0205 */
/*0070*/ IMAD R0, R0, c[0x0][0x178], R3 ; /* 0x00005e0000007a24 */
/* 0x000fca00078e0203 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x17c], PT ; /* 0x00005f0000007a0c */
/* 0x000fda0003f06270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*00c0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0207 */
/*00d0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x0c0fe400078e0207 */
/*00e0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00f0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0100*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fe200078e0207 */
/*0110*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*0120*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
__global__ void vecMatSum(int *a, int *b, int *c, int width, int length){
int row = blockIdx.x*blockDim.x + threadIdx.x;
int col = blockIdx.y*blockDim.y + threadIdx.y;
int tid = row*width+col;
if(tid < length)
c[tid] = a[tid] + b[tid];
}
int main(int argc, char* argv[]){
//initialization code
int width,size,threads,blocks,totalSize;
float total_time;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if(argv[2])
width = atoi(argv[2]);
else
width = 300;
size = width*width;
if(argv[1])
threads = atoi(argv[1]);
else
threads = 16;
dim3 ThreadsInBlock(threads,threads); //will provide threads * threads threads
blocks = (int) sqrt((float) size / (float) (threads*threads));
dim3 BlockDim(blocks,blocks);
while(BlockDim.x*BlockDim.y*threads*threads < size)
BlockDim.y += 1;
totalSize = size*sizeof(int);
printf("\n%ix%i blocks of %ix%i threads = %i threads total \n",BlockDim.x,BlockDim.y,ThreadsInBlock.x,ThreadsInBlock.y,BlockDim.x*BlockDim.y*ThreadsInBlock.x*ThreadsInBlock.y);
//end init
//start mallocs
int *a,*dev_a,*b,*dev_b,*c,*dev_c;
cudaMalloc((void**)&dev_a,totalSize);
cudaMalloc((void**)&dev_b,totalSize);
cudaMalloc((void**)&dev_c,totalSize);
a = (int*) malloc(totalSize);
b = (int*) malloc(totalSize);
c = (int*) malloc(totalSize);
//end mallocs
//problem specific
int idx;
for(idx=0;idx<size;idx++){
a[idx] = idx;
b[idx] = idx*2;
}
//copy to dev
cudaMemcpy(dev_a,a,totalSize,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,totalSize,cudaMemcpyHostToDevice);
//end copy
int iteration = 0;
float avg_time = 0;
for(iteration=0;iteration<10;iteration++){
//call kernel and measure times
cudaEventRecord(start,0);
vecMatSum<<<BlockDim,ThreadsInBlock>>>(dev_a,dev_b,dev_c,width,size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&total_time,start,stop);
printf("\n time for %i blocks of %i threads : %f \n",blocks,threads,total_time);
avg_time+=total_time;
}
avg_time/=10.0;
printf("average time for %ix%i matrix sum is %f ",width,width,avg_time);
//copy back and prints
cudaMemcpy(c,dev_c,totalSize,cudaMemcpyDeviceToHost);
for(idx=0;idx<size;idx+=size/5)
printf("\n a[%i]=%i\n",idx,c[idx]);
//free
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
} | .file "tmpxft_000bf282_00000000-6_CUDA_P6_MatrixSum.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z34__device_stub__Z9vecMatSumPiS_S_iiPiS_S_ii
.type _Z34__device_stub__Z9vecMatSumPiS_S_iiPiS_S_ii, @function
_Z34__device_stub__Z9vecMatSumPiS_S_iiPiS_S_ii:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9vecMatSumPiS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z34__device_stub__Z9vecMatSumPiS_S_iiPiS_S_ii, .-_Z34__device_stub__Z9vecMatSumPiS_S_iiPiS_S_ii
.globl _Z9vecMatSumPiS_S_ii
.type _Z9vecMatSumPiS_S_ii, @function
_Z9vecMatSumPiS_S_ii:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z9vecMatSumPiS_S_iiPiS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9vecMatSumPiS_S_ii, .-_Z9vecMatSumPiS_S_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "\n%ix%i blocks of %ix%i threads = %i threads total \n"
.align 8
.LC3:
.string "\n time for %i blocks of %i threads : %f \n"
.align 8
.LC5:
.string "average time for %ix%i matrix sum is %f "
.section .rodata.str1.1,"aMS",@progbits,1
.LC6:
.string "\n a[%i]=%i\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $136, %rsp
.cfi_def_cfa_offset 192
movq %rsi, %rbx
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rdi
call cudaEventCreate@PLT
leaq 64(%rsp), %rdi
call cudaEventCreate@PLT
movq 16(%rbx), %rdi
testq %rdi, %rdi
je .L12
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r14
movl %eax, 20(%rsp)
imull %eax, %r14d
movq 8(%rbx), %rdi
movl $16, %r15d
testq %rdi, %rdi
je .L13
.L27:
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r15d
.L13:
movl %r15d, %r13d
movl $1, 104(%rsp)
pxor %xmm0, %xmm0
cvtsi2ssl %r14d, %xmm0
movl %r15d, %eax
imull %r15d, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
divss %xmm1, %xmm0
pxor %xmm1, %xmm1
ucomiss %xmm0, %xmm1
ja .L38
.L26:
sqrtss %xmm0, %xmm0
.L16:
cvttss2sil %xmm0, %ebp
movl $1, 116(%rsp)
movl %ebp, %eax
imull %r13d, %eax
imull %eax, %eax
movl %r14d, %r9d
cmpl %r14d, %eax
jnb .L17
movl %r13d, %edx
imull %r13d, %edx
leal 1(%rbp), %eax
movl %edx, %esi
imull %ebp, %esi
movl %eax, %ecx
imull %esi, %ecx
movl %ebp, %r8d
imull %ebp, %r8d
imull %edx, %r8d
movl $0, %edx
.L18:
movl %eax, %r12d
addl $1, %eax
movl %ecx, %r10d
addl %esi, %ecx
addl %esi, %edx
leal (%rdx,%r8), %edi
cmpl %r9d, %edi
jb .L18
movslq %r14d, %rbx
subq $8, %rsp
.cfi_def_cfa_offset 200
pushq %r10
.cfi_def_cfa_offset 208
movl %r13d, %r9d
movl %r13d, %r8d
movl %r12d, %ecx
movl %ebp, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leal 0(,%r14,4), %eax
cltq
addq $16, %rsp
.cfi_def_cfa_offset 192
leaq 72(%rsp), %rdi
movq %rax, 8(%rsp)
movq %rax, %rsi
call cudaMalloc@PLT
leaq 80(%rsp), %rdi
movq 8(%rsp), %rsi
call cudaMalloc@PLT
leaq 88(%rsp), %rdi
movq 8(%rsp), %rsi
call cudaMalloc@PLT
movq 8(%rsp), %rdi
call malloc@PLT
movq %rax, 24(%rsp)
movq 8(%rsp), %rdi
call malloc@PLT
movq %rax, 32(%rsp)
movq 8(%rsp), %rdi
call malloc@PLT
movq %rax, 40(%rsp)
.L29:
movl $0, %eax
movq 24(%rsp), %rcx
movq 32(%rsp), %rsi
.L21:
movl %eax, (%rcx,%rax,4)
leal (%rax,%rax), %edx
movl %edx, (%rsi,%rax,4)
addq $1, %rax
cmpq %rbx, %rax
jne .L21
.L20:
movl $1, %ecx
movq 8(%rsp), %rbx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq 32(%rsp), %rsi
movq 80(%rsp), %rdi
call cudaMemcpy@PLT
movl $10, %ebx
movl $0x00000000, 16(%rsp)
jmp .L23
.L38:
call sqrtf@PLT
jmp .L16
.L17:
movslq %r14d, %rbx
subq $8, %rsp
.cfi_def_cfa_offset 200
pushq %rax
.cfi_def_cfa_offset 208
movl %r13d, %r9d
movl %r13d, %r8d
movl %ebp, %ecx
movl %ebp, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leal 0(,%r14,4), %eax
movslq %eax, %r12
movq %r12, 24(%rsp)
addq $16, %rsp
.cfi_def_cfa_offset 192
leaq 72(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
leaq 80(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
leaq 88(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
movq %r12, %rdi
call malloc@PLT
movq %rax, 24(%rsp)
movq %r12, %rdi
call malloc@PLT
movq %rax, 32(%rsp)
movq %r12, %rdi
call malloc@PLT
movq %rax, 40(%rsp)
movl %ebp, %r12d
testl %r14d, %r14d
jle .L20
jmp .L29
.L22:
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movq 64(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 52(%rsp), %rdi
movq 64(%rsp), %rdx
movq 56(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 52(%rsp), %xmm0
movl %r15d, %ecx
movl %ebp, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movss 16(%rsp), %xmm2
addss 52(%rsp), %xmm2
movss %xmm2, 16(%rsp)
subl $1, %ebx
je .L43
.L23:
movl $0, %esi
movq 56(%rsp), %rdi
call cudaEventRecord@PLT
movl %ebp, 108(%rsp)
movl %r12d, 112(%rsp)
movl %r13d, 96(%rsp)
movl %r13d, 100(%rsp)
movl 104(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 96(%rsp), %rdx
movq 108(%rsp), %rdi
movl 116(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L22
movl %r14d, %r8d
movl 20(%rsp), %ecx
movq 88(%rsp), %rdx
movq 80(%rsp), %rsi
movq 72(%rsp), %rdi
call _Z34__device_stub__Z9vecMatSumPiS_S_iiPiS_S_ii
jmp .L22
.L43:
movaps %xmm2, %xmm0
divss .LC4(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl 20(%rsp), %edx
movl %edx, %ecx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movq 8(%rsp), %rdx
movq 88(%rsp), %rsi
movq 40(%rsp), %rbp
movq %rbp, %rdi
call cudaMemcpy@PLT
testl %r14d, %r14d
jle .L24
movslq %r14d, %r15
imulq $1717986919, %r15, %r15
sarq $33, %r15
movl %r14d, %eax
sarl $31, %eax
subl %eax, %r15d
movslq %r15d, %r12
salq $2, %r12
movl $0, %ebx
leaq .LC6(%rip), %r13
.L25:
movl 0(%rbp), %ecx
movl %ebx, %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl %r15d, %ebx
addq %r12, %rbp
cmpl %r14d, %ebx
jl .L25
.L24:
movq 24(%rsp), %rdi
call free@PLT
movq 32(%rsp), %rdi
call free@PLT
movq 40(%rsp), %rdi
call free@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 80(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaEventDestroy@PLT
movq 64(%rsp), %rdi
call cudaEventDestroy@PLT
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L44
movl $0, %eax
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
movl $1, 104(%rsp)
movl $90000, %r14d
movl $16, %r15d
movl $300, 20(%rsp)
movss .LC0(%rip), %xmm0
movl $16, %r13d
jmp .L26
.L12:
movq 8(%rbx), %rdi
testq %rdi, %rdi
je .L45
movl $90000, %r14d
movl $300, 20(%rsp)
jmp .L27
.L44:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z9vecMatSumPiS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z9vecMatSumPiS_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1135593472
.align 4
.LC4:
.long 1092616192
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
__global__ void vecMatSum(int *a, int *b, int *c, int width, int length){
int row = blockIdx.x*blockDim.x + threadIdx.x;
int col = blockIdx.y*blockDim.y + threadIdx.y;
int tid = row*width+col;
if(tid < length)
c[tid] = a[tid] + b[tid];
}
int main(int argc, char* argv[]){
//initialization code
int width,size,threads,blocks,totalSize;
float total_time;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if(argv[2])
width = atoi(argv[2]);
else
width = 300;
size = width*width;
if(argv[1])
threads = atoi(argv[1]);
else
threads = 16;
dim3 ThreadsInBlock(threads,threads); //will provide threads * threads threads
blocks = (int) sqrt((float) size / (float) (threads*threads));
dim3 BlockDim(blocks,blocks);
while(BlockDim.x*BlockDim.y*threads*threads < size)
BlockDim.y += 1;
totalSize = size*sizeof(int);
printf("\n%ix%i blocks of %ix%i threads = %i threads total \n",BlockDim.x,BlockDim.y,ThreadsInBlock.x,ThreadsInBlock.y,BlockDim.x*BlockDim.y*ThreadsInBlock.x*ThreadsInBlock.y);
//end init
//start mallocs
int *a,*dev_a,*b,*dev_b,*c,*dev_c;
cudaMalloc((void**)&dev_a,totalSize);
cudaMalloc((void**)&dev_b,totalSize);
cudaMalloc((void**)&dev_c,totalSize);
a = (int*) malloc(totalSize);
b = (int*) malloc(totalSize);
c = (int*) malloc(totalSize);
//end mallocs
//problem specific
int idx;
for(idx=0;idx<size;idx++){
a[idx] = idx;
b[idx] = idx*2;
}
//copy to dev
cudaMemcpy(dev_a,a,totalSize,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,totalSize,cudaMemcpyHostToDevice);
//end copy
int iteration = 0;
float avg_time = 0;
for(iteration=0;iteration<10;iteration++){
//call kernel and measure times
cudaEventRecord(start,0);
vecMatSum<<<BlockDim,ThreadsInBlock>>>(dev_a,dev_b,dev_c,width,size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&total_time,start,stop);
printf("\n time for %i blocks of %i threads : %f \n",blocks,threads,total_time);
avg_time+=total_time;
}
avg_time/=10.0;
printf("average time for %ix%i matrix sum is %f ",width,width,avg_time);
//copy back and prints
cudaMemcpy(c,dev_c,totalSize,cudaMemcpyDeviceToHost);
for(idx=0;idx<size;idx+=size/5)
printf("\n a[%i]=%i\n",idx,c[idx]);
//free
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void vecMatSum(int *a, int *b, int *c, int width, int length){
int row = blockIdx.x*blockDim.x + threadIdx.x;
int col = blockIdx.y*blockDim.y + threadIdx.y;
int tid = row*width+col;
if(tid < length)
c[tid] = a[tid] + b[tid];
}
int main(int argc, char* argv[]){
//initialization code
int width,size,threads,blocks,totalSize;
float total_time;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
if(argv[2])
width = atoi(argv[2]);
else
width = 300;
size = width*width;
if(argv[1])
threads = atoi(argv[1]);
else
threads = 16;
dim3 ThreadsInBlock(threads,threads); //will provide threads * threads threads
blocks = (int) sqrt((float) size / (float) (threads*threads));
dim3 BlockDim(blocks,blocks);
while(BlockDim.x*BlockDim.y*threads*threads < size)
BlockDim.y += 1;
totalSize = size*sizeof(int);
printf("\n%ix%i blocks of %ix%i threads = %i threads total \n",BlockDim.x,BlockDim.y,ThreadsInBlock.x,ThreadsInBlock.y,BlockDim.x*BlockDim.y*ThreadsInBlock.x*ThreadsInBlock.y);
//end init
//start mallocs
int *a,*dev_a,*b,*dev_b,*c,*dev_c;
hipMalloc((void**)&dev_a,totalSize);
hipMalloc((void**)&dev_b,totalSize);
hipMalloc((void**)&dev_c,totalSize);
a = (int*) malloc(totalSize);
b = (int*) malloc(totalSize);
c = (int*) malloc(totalSize);
//end mallocs
//problem specific
int idx;
for(idx=0;idx<size;idx++){
a[idx] = idx;
b[idx] = idx*2;
}
//copy to dev
hipMemcpy(dev_a,a,totalSize,hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,totalSize,hipMemcpyHostToDevice);
//end copy
int iteration = 0;
float avg_time = 0;
for(iteration=0;iteration<10;iteration++){
//call kernel and measure times
hipEventRecord(start,0);
vecMatSum<<<BlockDim,ThreadsInBlock>>>(dev_a,dev_b,dev_c,width,size);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&total_time,start,stop);
printf("\n time for %i blocks of %i threads : %f \n",blocks,threads,total_time);
avg_time+=total_time;
}
avg_time/=10.0;
printf("average time for %ix%i matrix sum is %f ",width,width,avg_time);
//copy back and prints
hipMemcpy(c,dev_c,totalSize,hipMemcpyDeviceToHost);
for(idx=0;idx<size;idx+=size/5)
printf("\n a[%i]=%i\n",idx,c[idx]);
//free
free(a);
free(b);
free(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void vecMatSum(int *a, int *b, int *c, int width, int length){
int row = blockIdx.x*blockDim.x + threadIdx.x;
int col = blockIdx.y*blockDim.y + threadIdx.y;
int tid = row*width+col;
if(tid < length)
c[tid] = a[tid] + b[tid];
}
int main(int argc, char* argv[]){
//initialization code
int width,size,threads,blocks,totalSize;
float total_time;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
if(argv[2])
width = atoi(argv[2]);
else
width = 300;
size = width*width;
if(argv[1])
threads = atoi(argv[1]);
else
threads = 16;
dim3 ThreadsInBlock(threads,threads); //will provide threads * threads threads
blocks = (int) sqrt((float) size / (float) (threads*threads));
dim3 BlockDim(blocks,blocks);
while(BlockDim.x*BlockDim.y*threads*threads < size)
BlockDim.y += 1;
totalSize = size*sizeof(int);
printf("\n%ix%i blocks of %ix%i threads = %i threads total \n",BlockDim.x,BlockDim.y,ThreadsInBlock.x,ThreadsInBlock.y,BlockDim.x*BlockDim.y*ThreadsInBlock.x*ThreadsInBlock.y);
//end init
//start mallocs
int *a,*dev_a,*b,*dev_b,*c,*dev_c;
hipMalloc((void**)&dev_a,totalSize);
hipMalloc((void**)&dev_b,totalSize);
hipMalloc((void**)&dev_c,totalSize);
a = (int*) malloc(totalSize);
b = (int*) malloc(totalSize);
c = (int*) malloc(totalSize);
//end mallocs
//problem specific
int idx;
for(idx=0;idx<size;idx++){
a[idx] = idx;
b[idx] = idx*2;
}
//copy to dev
hipMemcpy(dev_a,a,totalSize,hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,totalSize,hipMemcpyHostToDevice);
//end copy
int iteration = 0;
float avg_time = 0;
for(iteration=0;iteration<10;iteration++){
//call kernel and measure times
hipEventRecord(start,0);
vecMatSum<<<BlockDim,ThreadsInBlock>>>(dev_a,dev_b,dev_c,width,size);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&total_time,start,stop);
printf("\n time for %i blocks of %i threads : %f \n",blocks,threads,total_time);
avg_time+=total_time;
}
avg_time/=10.0;
printf("average time for %ix%i matrix sum is %f ",width,width,avg_time);
//copy back and prints
hipMemcpy(c,dev_c,totalSize,hipMemcpyDeviceToHost);
for(idx=0;idx<size;idx+=size/5)
printf("\n a[%i]=%i\n",idx,c[idx]);
//free
free(a);
free(b);
free(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9vecMatSumPiS_S_ii
.globl _Z9vecMatSumPiS_S_ii
.p2align 8
.type _Z9vecMatSumPiS_S_ii,@function
_Z9vecMatSumPiS_S_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b64 s[2:3], s[0:1], 0x18
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s4, 0xffff
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s14, s5, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v1, v2, s2
s_lshr_b32 s2, s4, 16
s_mul_i32 s15, s15, s2
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v0, s15, v0, v1
v_cmpx_gt_i32_e64 s3, v0
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9vecMatSumPiS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9vecMatSumPiS_S_ii, .Lfunc_end0-_Z9vecMatSumPiS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9vecMatSumPiS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9vecMatSumPiS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void vecMatSum(int *a, int *b, int *c, int width, int length){
int row = blockIdx.x*blockDim.x + threadIdx.x;
int col = blockIdx.y*blockDim.y + threadIdx.y;
int tid = row*width+col;
if(tid < length)
c[tid] = a[tid] + b[tid];
}
int main(int argc, char* argv[]){
//initialization code
int width,size,threads,blocks,totalSize;
float total_time;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
if(argv[2])
width = atoi(argv[2]);
else
width = 300;
size = width*width;
if(argv[1])
threads = atoi(argv[1]);
else
threads = 16;
dim3 ThreadsInBlock(threads,threads); //will provide threads * threads threads
blocks = (int) sqrt((float) size / (float) (threads*threads));
dim3 BlockDim(blocks,blocks);
while(BlockDim.x*BlockDim.y*threads*threads < size)
BlockDim.y += 1;
totalSize = size*sizeof(int);
printf("\n%ix%i blocks of %ix%i threads = %i threads total \n",BlockDim.x,BlockDim.y,ThreadsInBlock.x,ThreadsInBlock.y,BlockDim.x*BlockDim.y*ThreadsInBlock.x*ThreadsInBlock.y);
//end init
//start mallocs
int *a,*dev_a,*b,*dev_b,*c,*dev_c;
hipMalloc((void**)&dev_a,totalSize);
hipMalloc((void**)&dev_b,totalSize);
hipMalloc((void**)&dev_c,totalSize);
a = (int*) malloc(totalSize);
b = (int*) malloc(totalSize);
c = (int*) malloc(totalSize);
//end mallocs
//problem specific
int idx;
for(idx=0;idx<size;idx++){
a[idx] = idx;
b[idx] = idx*2;
}
//copy to dev
hipMemcpy(dev_a,a,totalSize,hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,totalSize,hipMemcpyHostToDevice);
//end copy
int iteration = 0;
float avg_time = 0;
for(iteration=0;iteration<10;iteration++){
//call kernel and measure times
hipEventRecord(start,0);
vecMatSum<<<BlockDim,ThreadsInBlock>>>(dev_a,dev_b,dev_c,width,size);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&total_time,start,stop);
printf("\n time for %i blocks of %i threads : %f \n",blocks,threads,total_time);
avg_time+=total_time;
}
avg_time/=10.0;
printf("average time for %ix%i matrix sum is %f ",width,width,avg_time);
//copy back and prints
hipMemcpy(c,dev_c,totalSize,hipMemcpyDeviceToHost);
for(idx=0;idx<size;idx+=size/5)
printf("\n a[%i]=%i\n",idx,c[idx]);
//free
free(a);
free(b);
free(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
} | .text
.file "CUDA_P6_MatrixSum.hip"
.globl _Z24__device_stub__vecMatSumPiS_S_ii # -- Begin function _Z24__device_stub__vecMatSumPiS_S_ii
.p2align 4, 0x90
.type _Z24__device_stub__vecMatSumPiS_S_ii,@function
_Z24__device_stub__vecMatSumPiS_S_ii: # @_Z24__device_stub__vecMatSumPiS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9vecMatSumPiS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__vecMatSumPiS_S_ii, .Lfunc_end0-_Z24__device_stub__vecMatSumPiS_S_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x41200000 # float 10
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $232, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
leaq 72(%rsp), %rdi
callq hipEventCreate
leaq 24(%rsp), %rdi
callq hipEventCreate
movq 16(%rbx), %rdi
testq %rdi, %rdi
je .LBB1_1
# %bb.2:
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
jmp .LBB1_3
.LBB1_1:
movl $300, %eax # imm = 0x12C
.LBB1_3:
movq %rax, 32(%rsp) # 8-byte Spill
movl %eax, %r8d
imull %r8d, %r8d
movq 8(%rbx), %rdi
testq %rdi, %rdi
movq %r8, (%rsp) # 8-byte Spill
je .LBB1_4
# %bb.5:
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq (%rsp), %r8 # 8-byte Reload
movq %rax, %r14
jmp .LBB1_6
.LBB1_4:
movl $16, %r14d
.LBB1_6:
cvtsi2ss %r8d, %xmm0
movabsq $4294967297, %rbp # imm = 0x100000001
movl %r14d, %r15d
movl %r14d, %ebx
imull %r14d, %ebx
cvtsi2ss %ebx, %xmm1
divss %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
xorps %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jb .LBB1_8
# %bb.7:
sqrtsd %xmm0, %xmm0
jmp .LBB1_9
.LBB1_8: # %call.sqrt
callq sqrt
movq (%rsp), %r8 # 8-byte Reload
.LBB1_9: # %.split
cvttsd2si %xmm0, %r12d
movq %r12, %r13
imulq %rbp, %r13
movl %r12d, %eax
imull %r13d, %eax
movl %ebx, %ecx
imull %eax, %ecx
cmpl %r8d, %ecx
jae .LBB1_10
# %bb.11: # %.lr.ph.preheader
movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
.p2align 4, 0x90
.LBB1_12: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %r13d, %esi
andq %rcx, %r13
movq %rbp, %rax
addq %r13, %rax
leaq -1(,%r13), %rdx
addq %rbp, %rdx
movl %esi, %edi
leaq (%rdi,%rax), %r13
decq %r13
shrq $32, %rdx
movl %edx, %eax
imull %esi, %eax
movl %ebx, %edi
imull %eax, %edi
cmpl %r8d, %edi
jb .LBB1_12
jmp .LBB1_13
.LBB1_10:
movl %r13d, %esi
movl %r12d, %edx
.LBB1_13: # %._crit_edge
movq %r15, %rcx
shlq $32, %rcx
movq %rcx, 40(%rsp) # 8-byte Spill
leal (,%r8,4), %ecx
movl %ecx, 16(%rsp) # 4-byte Spill
imull %eax, %ebx
xorl %ebp, %ebp
movl $.L.str, %edi
# kill: def $edx killed $edx killed $rdx
movl %r14d, %ecx
movq %r14, 96(%rsp) # 8-byte Spill
movl %r14d, %r8d
movl %ebx, %r9d
xorl %eax, %eax
callq printf
movslq 16(%rsp), %r14 # 4-byte Folded Reload
leaq 64(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 56(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 48(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq %r14, %rdi
callq malloc
movq %rax, %rbx
movq %r14, %rdi
callq malloc
movq %rax, 16(%rsp) # 8-byte Spill
movq %r14, 88(%rsp) # 8-byte Spill
movq %r14, %rdi
callq malloc
movq 16(%rsp), %rdx # 8-byte Reload
movq %rax, 104(%rsp) # 8-byte Spill
cmpl $0, 32(%rsp) # 4-byte Folded Reload
je .LBB1_16
# %bb.14: # %.lr.ph90.preheader
movq (%rsp), %rax # 8-byte Reload
cmpl $1, %eax
# kill: def $eax killed $eax killed $rax def $rax
adcl $0, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_15: # %.lr.ph90
# =>This Inner Loop Header: Depth=1
movl %ecx, (%rbx,%rcx,4)
movl %ebp, (%rdx,%rcx,4)
incq %rcx
addl $2, %ebp
cmpq %rcx, %rax
jne .LBB1_15
.LBB1_16: # %._crit_edge91
addq 40(%rsp), %r15 # 8-byte Folded Reload
movq %rdx, %rbp
movq 64(%rsp), %rdi
movq %rbx, 112(%rsp) # 8-byte Spill
movq %rbx, %rsi
movq 88(%rsp), %rbx # 8-byte Reload
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movq 56(%rsp), %rdi
movq %rbp, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
xorps %xmm0, %xmm0
movl $10, %ebp
leaq 12(%rsp), %rbx
movq 96(%rsp), %r14 # 8-byte Reload
jmp .LBB1_17
.p2align 4, 0x90
.LBB1_19: # in Loop: Header=BB1_17 Depth=1
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 24(%rsp), %rdi
callq hipEventSynchronize
movq 72(%rsp), %rsi
movq 24(%rsp), %rdx
movq %rbx, %rdi
callq hipEventElapsedTime
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movl %r12d, %esi
movl %r14d, %edx
movb $1, %al
callq printf
movss 40(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
addss 12(%rsp), %xmm0
decl %ebp
je .LBB1_20
.LBB1_17: # =>This Inner Loop Header: Depth=1
movss %xmm0, 40(%rsp) # 4-byte Spill
movq 72(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq %r13, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_19
# %bb.18: # in Loop: Header=BB1_17 Depth=1
movq 64(%rsp), %rax
movq 56(%rsp), %rcx
movq 48(%rsp), %rdx
movq %rax, 184(%rsp)
movq %rcx, 176(%rsp)
movq %rdx, 168(%rsp)
movq 32(%rsp), %rax # 8-byte Reload
movl %eax, 84(%rsp)
movq (%rsp), %rax # 8-byte Reload
movl %eax, 80(%rsp)
leaq 184(%rsp), %rax
movq %rax, 192(%rsp)
leaq 176(%rsp), %rax
movq %rax, 200(%rsp)
leaq 168(%rsp), %rax
movq %rax, 208(%rsp)
leaq 84(%rsp), %rax
movq %rax, 216(%rsp)
leaq 80(%rsp), %rax
movq %rax, 224(%rsp)
leaq 152(%rsp), %rdi
leaq 136(%rsp), %rsi
leaq 128(%rsp), %rdx
leaq 120(%rsp), %rcx
callq __hipPopCallConfiguration
movq 152(%rsp), %rsi
movl 160(%rsp), %edx
movq 136(%rsp), %rcx
movl 144(%rsp), %r8d
movl $_Z9vecMatSumPiS_S_ii, %edi
leaq 192(%rsp), %r9
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
pushq 136(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB1_19
.LBB1_20:
divss .LCPI1_0(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movq 32(%rsp), %rbx # 8-byte Reload
movl %ebx, %esi
movl %ebx, %edx
movb $1, %al
callq printf
movq 48(%rsp), %rsi
movq 104(%rsp), %r12 # 8-byte Reload
movq %r12, %rdi
movq 88(%rsp), %rdx # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
testl %ebx, %ebx
je .LBB1_23
# %bb.21: # %.lr.ph96
movl (%rsp), %r14d # 4-byte Reload
movl $3435973837, %r15d # imm = 0xCCCCCCCD
imulq %r14, %r15
shrq $34, %r15
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_22: # =>This Inner Loop Header: Depth=1
movl (%r12,%rbx,4), %edx
movl $.L.str.3, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
addq %r15, %rbx
cmpq %r14, %rbx
jb .LBB1_22
.LBB1_23: # %._crit_edge97
movq 112(%rsp), %rdi # 8-byte Reload
callq free
movq 16(%rsp), %rdi # 8-byte Reload
callq free
movq %r12, %rdi
callq free
movq 64(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
movq 72(%rsp), %rdi
callq hipEventDestroy
movq 24(%rsp), %rdi
callq hipEventDestroy
xorl %eax, %eax
addq $232, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9vecMatSumPiS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9vecMatSumPiS_S_ii,@object # @_Z9vecMatSumPiS_S_ii
.section .rodata,"a",@progbits
.globl _Z9vecMatSumPiS_S_ii
.p2align 3, 0x0
_Z9vecMatSumPiS_S_ii:
.quad _Z24__device_stub__vecMatSumPiS_S_ii
.size _Z9vecMatSumPiS_S_ii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\n%ix%i blocks of %ix%i threads = %i threads total \n"
.size .L.str, 52
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\n time for %i blocks of %i threads : %f \n"
.size .L.str.1, 42
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "average time for %ix%i matrix sum is %f "
.size .L.str.2, 41
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "\n a[%i]=%i\n"
.size .L.str.3, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9vecMatSumPiS_S_ii"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__vecMatSumPiS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9vecMatSumPiS_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9vecMatSumPiS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fc400078e0203 */
/*0060*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */
/* 0x002fc800078e0205 */
/*0070*/ IMAD R0, R0, c[0x0][0x178], R3 ; /* 0x00005e0000007a24 */
/* 0x000fca00078e0203 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x17c], PT ; /* 0x00005f0000007a0c */
/* 0x000fda0003f06270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*00c0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0207 */
/*00d0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x0c0fe400078e0207 */
/*00e0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00f0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0100*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fe200078e0207 */
/*0110*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*0120*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9vecMatSumPiS_S_ii
.globl _Z9vecMatSumPiS_S_ii
.p2align 8
.type _Z9vecMatSumPiS_S_ii,@function
_Z9vecMatSumPiS_S_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b64 s[2:3], s[0:1], 0x18
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s4, 0xffff
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s14, s5, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v1, v2, s2
s_lshr_b32 s2, s4, 16
s_mul_i32 s15, s15, s2
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v0, s15, v0, v1
v_cmpx_gt_i32_e64 s3, v0
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9vecMatSumPiS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9vecMatSumPiS_S_ii, .Lfunc_end0-_Z9vecMatSumPiS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9vecMatSumPiS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9vecMatSumPiS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000bf282_00000000-6_CUDA_P6_MatrixSum.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z34__device_stub__Z9vecMatSumPiS_S_iiPiS_S_ii
.type _Z34__device_stub__Z9vecMatSumPiS_S_iiPiS_S_ii, @function
_Z34__device_stub__Z9vecMatSumPiS_S_iiPiS_S_ii:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9vecMatSumPiS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z34__device_stub__Z9vecMatSumPiS_S_iiPiS_S_ii, .-_Z34__device_stub__Z9vecMatSumPiS_S_iiPiS_S_ii
.globl _Z9vecMatSumPiS_S_ii
.type _Z9vecMatSumPiS_S_ii, @function
_Z9vecMatSumPiS_S_ii:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z9vecMatSumPiS_S_iiPiS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9vecMatSumPiS_S_ii, .-_Z9vecMatSumPiS_S_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "\n%ix%i blocks of %ix%i threads = %i threads total \n"
.align 8
.LC3:
.string "\n time for %i blocks of %i threads : %f \n"
.align 8
.LC5:
.string "average time for %ix%i matrix sum is %f "
.section .rodata.str1.1,"aMS",@progbits,1
.LC6:
.string "\n a[%i]=%i\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $136, %rsp
.cfi_def_cfa_offset 192
movq %rsi, %rbx
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rdi
call cudaEventCreate@PLT
leaq 64(%rsp), %rdi
call cudaEventCreate@PLT
movq 16(%rbx), %rdi
testq %rdi, %rdi
je .L12
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r14
movl %eax, 20(%rsp)
imull %eax, %r14d
movq 8(%rbx), %rdi
movl $16, %r15d
testq %rdi, %rdi
je .L13
.L27:
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r15d
.L13:
movl %r15d, %r13d
movl $1, 104(%rsp)
pxor %xmm0, %xmm0
cvtsi2ssl %r14d, %xmm0
movl %r15d, %eax
imull %r15d, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
divss %xmm1, %xmm0
pxor %xmm1, %xmm1
ucomiss %xmm0, %xmm1
ja .L38
.L26:
sqrtss %xmm0, %xmm0
.L16:
cvttss2sil %xmm0, %ebp
movl $1, 116(%rsp)
movl %ebp, %eax
imull %r13d, %eax
imull %eax, %eax
movl %r14d, %r9d
cmpl %r14d, %eax
jnb .L17
movl %r13d, %edx
imull %r13d, %edx
leal 1(%rbp), %eax
movl %edx, %esi
imull %ebp, %esi
movl %eax, %ecx
imull %esi, %ecx
movl %ebp, %r8d
imull %ebp, %r8d
imull %edx, %r8d
movl $0, %edx
.L18:
movl %eax, %r12d
addl $1, %eax
movl %ecx, %r10d
addl %esi, %ecx
addl %esi, %edx
leal (%rdx,%r8), %edi
cmpl %r9d, %edi
jb .L18
movslq %r14d, %rbx
subq $8, %rsp
.cfi_def_cfa_offset 200
pushq %r10
.cfi_def_cfa_offset 208
movl %r13d, %r9d
movl %r13d, %r8d
movl %r12d, %ecx
movl %ebp, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leal 0(,%r14,4), %eax
cltq
addq $16, %rsp
.cfi_def_cfa_offset 192
leaq 72(%rsp), %rdi
movq %rax, 8(%rsp)
movq %rax, %rsi
call cudaMalloc@PLT
leaq 80(%rsp), %rdi
movq 8(%rsp), %rsi
call cudaMalloc@PLT
leaq 88(%rsp), %rdi
movq 8(%rsp), %rsi
call cudaMalloc@PLT
movq 8(%rsp), %rdi
call malloc@PLT
movq %rax, 24(%rsp)
movq 8(%rsp), %rdi
call malloc@PLT
movq %rax, 32(%rsp)
movq 8(%rsp), %rdi
call malloc@PLT
movq %rax, 40(%rsp)
.L29:
movl $0, %eax
movq 24(%rsp), %rcx
movq 32(%rsp), %rsi
.L21:
movl %eax, (%rcx,%rax,4)
leal (%rax,%rax), %edx
movl %edx, (%rsi,%rax,4)
addq $1, %rax
cmpq %rbx, %rax
jne .L21
.L20:
movl $1, %ecx
movq 8(%rsp), %rbx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq 32(%rsp), %rsi
movq 80(%rsp), %rdi
call cudaMemcpy@PLT
movl $10, %ebx
movl $0x00000000, 16(%rsp)
jmp .L23
.L38:
call sqrtf@PLT
jmp .L16
.L17:
movslq %r14d, %rbx
subq $8, %rsp
.cfi_def_cfa_offset 200
pushq %rax
.cfi_def_cfa_offset 208
movl %r13d, %r9d
movl %r13d, %r8d
movl %ebp, %ecx
movl %ebp, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leal 0(,%r14,4), %eax
movslq %eax, %r12
movq %r12, 24(%rsp)
addq $16, %rsp
.cfi_def_cfa_offset 192
leaq 72(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
leaq 80(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
leaq 88(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
movq %r12, %rdi
call malloc@PLT
movq %rax, 24(%rsp)
movq %r12, %rdi
call malloc@PLT
movq %rax, 32(%rsp)
movq %r12, %rdi
call malloc@PLT
movq %rax, 40(%rsp)
movl %ebp, %r12d
testl %r14d, %r14d
jle .L20
jmp .L29
.L22:
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movq 64(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 52(%rsp), %rdi
movq 64(%rsp), %rdx
movq 56(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 52(%rsp), %xmm0
movl %r15d, %ecx
movl %ebp, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movss 16(%rsp), %xmm2
addss 52(%rsp), %xmm2
movss %xmm2, 16(%rsp)
subl $1, %ebx
je .L43
.L23:
movl $0, %esi
movq 56(%rsp), %rdi
call cudaEventRecord@PLT
movl %ebp, 108(%rsp)
movl %r12d, 112(%rsp)
movl %r13d, 96(%rsp)
movl %r13d, 100(%rsp)
movl 104(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 96(%rsp), %rdx
movq 108(%rsp), %rdi
movl 116(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L22
movl %r14d, %r8d
movl 20(%rsp), %ecx
movq 88(%rsp), %rdx
movq 80(%rsp), %rsi
movq 72(%rsp), %rdi
call _Z34__device_stub__Z9vecMatSumPiS_S_iiPiS_S_ii
jmp .L22
.L43:
movaps %xmm2, %xmm0
divss .LC4(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl 20(%rsp), %edx
movl %edx, %ecx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movq 8(%rsp), %rdx
movq 88(%rsp), %rsi
movq 40(%rsp), %rbp
movq %rbp, %rdi
call cudaMemcpy@PLT
testl %r14d, %r14d
jle .L24
movslq %r14d, %r15
imulq $1717986919, %r15, %r15
sarq $33, %r15
movl %r14d, %eax
sarl $31, %eax
subl %eax, %r15d
movslq %r15d, %r12
salq $2, %r12
movl $0, %ebx
leaq .LC6(%rip), %r13
.L25:
movl 0(%rbp), %ecx
movl %ebx, %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl %r15d, %ebx
addq %r12, %rbp
cmpl %r14d, %ebx
jl .L25
.L24:
movq 24(%rsp), %rdi
call free@PLT
movq 32(%rsp), %rdi
call free@PLT
movq 40(%rsp), %rdi
call free@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 80(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaEventDestroy@PLT
movq 64(%rsp), %rdi
call cudaEventDestroy@PLT
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L44
movl $0, %eax
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
movl $1, 104(%rsp)
movl $90000, %r14d
movl $16, %r15d
movl $300, 20(%rsp)
movss .LC0(%rip), %xmm0
movl $16, %r13d
jmp .L26
.L12:
movq 8(%rbx), %rdi
testq %rdi, %rdi
je .L45
movl $90000, %r14d
movl $300, 20(%rsp)
jmp .L27
.L44:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z9vecMatSumPiS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z9vecMatSumPiS_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1135593472
.align 4
.LC4:
.long 1092616192
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "CUDA_P6_MatrixSum.hip"
.globl _Z24__device_stub__vecMatSumPiS_S_ii # -- Begin function _Z24__device_stub__vecMatSumPiS_S_ii
.p2align 4, 0x90
.type _Z24__device_stub__vecMatSumPiS_S_ii,@function
_Z24__device_stub__vecMatSumPiS_S_ii: # @_Z24__device_stub__vecMatSumPiS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9vecMatSumPiS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__vecMatSumPiS_S_ii, .Lfunc_end0-_Z24__device_stub__vecMatSumPiS_S_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x41200000 # float 10
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $232, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
leaq 72(%rsp), %rdi
callq hipEventCreate
leaq 24(%rsp), %rdi
callq hipEventCreate
movq 16(%rbx), %rdi
testq %rdi, %rdi
je .LBB1_1
# %bb.2:
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
jmp .LBB1_3
.LBB1_1:
movl $300, %eax # imm = 0x12C
.LBB1_3:
movq %rax, 32(%rsp) # 8-byte Spill
movl %eax, %r8d
imull %r8d, %r8d
movq 8(%rbx), %rdi
testq %rdi, %rdi
movq %r8, (%rsp) # 8-byte Spill
je .LBB1_4
# %bb.5:
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq (%rsp), %r8 # 8-byte Reload
movq %rax, %r14
jmp .LBB1_6
.LBB1_4:
movl $16, %r14d
.LBB1_6:
cvtsi2ss %r8d, %xmm0
movabsq $4294967297, %rbp # imm = 0x100000001
movl %r14d, %r15d
movl %r14d, %ebx
imull %r14d, %ebx
cvtsi2ss %ebx, %xmm1
divss %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
xorps %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jb .LBB1_8
# %bb.7:
sqrtsd %xmm0, %xmm0
jmp .LBB1_9
.LBB1_8: # %call.sqrt
callq sqrt
movq (%rsp), %r8 # 8-byte Reload
.LBB1_9: # %.split
cvttsd2si %xmm0, %r12d
movq %r12, %r13
imulq %rbp, %r13
movl %r12d, %eax
imull %r13d, %eax
movl %ebx, %ecx
imull %eax, %ecx
cmpl %r8d, %ecx
jae .LBB1_10
# %bb.11: # %.lr.ph.preheader
movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
.p2align 4, 0x90
.LBB1_12: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %r13d, %esi
andq %rcx, %r13
movq %rbp, %rax
addq %r13, %rax
leaq -1(,%r13), %rdx
addq %rbp, %rdx
movl %esi, %edi
leaq (%rdi,%rax), %r13
decq %r13
shrq $32, %rdx
movl %edx, %eax
imull %esi, %eax
movl %ebx, %edi
imull %eax, %edi
cmpl %r8d, %edi
jb .LBB1_12
jmp .LBB1_13
.LBB1_10:
movl %r13d, %esi
movl %r12d, %edx
.LBB1_13: # %._crit_edge
movq %r15, %rcx
shlq $32, %rcx
movq %rcx, 40(%rsp) # 8-byte Spill
leal (,%r8,4), %ecx
movl %ecx, 16(%rsp) # 4-byte Spill
imull %eax, %ebx
xorl %ebp, %ebp
movl $.L.str, %edi
# kill: def $edx killed $edx killed $rdx
movl %r14d, %ecx
movq %r14, 96(%rsp) # 8-byte Spill
movl %r14d, %r8d
movl %ebx, %r9d
xorl %eax, %eax
callq printf
movslq 16(%rsp), %r14 # 4-byte Folded Reload
leaq 64(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 56(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 48(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq %r14, %rdi
callq malloc
movq %rax, %rbx
movq %r14, %rdi
callq malloc
movq %rax, 16(%rsp) # 8-byte Spill
movq %r14, 88(%rsp) # 8-byte Spill
movq %r14, %rdi
callq malloc
movq 16(%rsp), %rdx # 8-byte Reload
movq %rax, 104(%rsp) # 8-byte Spill
cmpl $0, 32(%rsp) # 4-byte Folded Reload
je .LBB1_16
# %bb.14: # %.lr.ph90.preheader
movq (%rsp), %rax # 8-byte Reload
cmpl $1, %eax
# kill: def $eax killed $eax killed $rax def $rax
adcl $0, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_15: # %.lr.ph90
# =>This Inner Loop Header: Depth=1
movl %ecx, (%rbx,%rcx,4)
movl %ebp, (%rdx,%rcx,4)
incq %rcx
addl $2, %ebp
cmpq %rcx, %rax
jne .LBB1_15
.LBB1_16: # %._crit_edge91
addq 40(%rsp), %r15 # 8-byte Folded Reload
movq %rdx, %rbp
movq 64(%rsp), %rdi
movq %rbx, 112(%rsp) # 8-byte Spill
movq %rbx, %rsi
movq 88(%rsp), %rbx # 8-byte Reload
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movq 56(%rsp), %rdi
movq %rbp, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
xorps %xmm0, %xmm0
movl $10, %ebp
leaq 12(%rsp), %rbx
movq 96(%rsp), %r14 # 8-byte Reload
jmp .LBB1_17
.p2align 4, 0x90
.LBB1_19: # in Loop: Header=BB1_17 Depth=1
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 24(%rsp), %rdi
callq hipEventSynchronize
movq 72(%rsp), %rsi
movq 24(%rsp), %rdx
movq %rbx, %rdi
callq hipEventElapsedTime
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movl %r12d, %esi
movl %r14d, %edx
movb $1, %al
callq printf
movss 40(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
addss 12(%rsp), %xmm0
decl %ebp
je .LBB1_20
.LBB1_17: # =>This Inner Loop Header: Depth=1
movss %xmm0, 40(%rsp) # 4-byte Spill
movq 72(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq %r13, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_19
# %bb.18: # in Loop: Header=BB1_17 Depth=1
movq 64(%rsp), %rax
movq 56(%rsp), %rcx
movq 48(%rsp), %rdx
movq %rax, 184(%rsp)
movq %rcx, 176(%rsp)
movq %rdx, 168(%rsp)
movq 32(%rsp), %rax # 8-byte Reload
movl %eax, 84(%rsp)
movq (%rsp), %rax # 8-byte Reload
movl %eax, 80(%rsp)
leaq 184(%rsp), %rax
movq %rax, 192(%rsp)
leaq 176(%rsp), %rax
movq %rax, 200(%rsp)
leaq 168(%rsp), %rax
movq %rax, 208(%rsp)
leaq 84(%rsp), %rax
movq %rax, 216(%rsp)
leaq 80(%rsp), %rax
movq %rax, 224(%rsp)
leaq 152(%rsp), %rdi
leaq 136(%rsp), %rsi
leaq 128(%rsp), %rdx
leaq 120(%rsp), %rcx
callq __hipPopCallConfiguration
movq 152(%rsp), %rsi
movl 160(%rsp), %edx
movq 136(%rsp), %rcx
movl 144(%rsp), %r8d
movl $_Z9vecMatSumPiS_S_ii, %edi
leaq 192(%rsp), %r9
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
pushq 136(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB1_19
.LBB1_20:
divss .LCPI1_0(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movq 32(%rsp), %rbx # 8-byte Reload
movl %ebx, %esi
movl %ebx, %edx
movb $1, %al
callq printf
movq 48(%rsp), %rsi
movq 104(%rsp), %r12 # 8-byte Reload
movq %r12, %rdi
movq 88(%rsp), %rdx # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
testl %ebx, %ebx
je .LBB1_23
# %bb.21: # %.lr.ph96
movl (%rsp), %r14d # 4-byte Reload
movl $3435973837, %r15d # imm = 0xCCCCCCCD
imulq %r14, %r15
shrq $34, %r15
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_22: # =>This Inner Loop Header: Depth=1
movl (%r12,%rbx,4), %edx
movl $.L.str.3, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
addq %r15, %rbx
cmpq %r14, %rbx
jb .LBB1_22
.LBB1_23: # %._crit_edge97
movq 112(%rsp), %rdi # 8-byte Reload
callq free
movq 16(%rsp), %rdi # 8-byte Reload
callq free
movq %r12, %rdi
callq free
movq 64(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
movq 72(%rsp), %rdi
callq hipEventDestroy
movq 24(%rsp), %rdi
callq hipEventDestroy
xorl %eax, %eax
addq $232, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9vecMatSumPiS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9vecMatSumPiS_S_ii,@object # @_Z9vecMatSumPiS_S_ii
.section .rodata,"a",@progbits
.globl _Z9vecMatSumPiS_S_ii
.p2align 3, 0x0
_Z9vecMatSumPiS_S_ii:
.quad _Z24__device_stub__vecMatSumPiS_S_ii
.size _Z9vecMatSumPiS_S_ii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\n%ix%i blocks of %ix%i threads = %i threads total \n"
.size .L.str, 52
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\n time for %i blocks of %i threads : %f \n"
.size .L.str.1, 42
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "average time for %ix%i matrix sum is %f "
.size .L.str.2, 41
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "\n a[%i]=%i\n"
.size .L.str.3, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9vecMatSumPiS_S_ii"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__vecMatSumPiS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9vecMatSumPiS_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cstdio>
#include <cmath>
#include <algorithm>
#include <climits>
#include <cuda_runtime.h>
#include "CudaGillespie_cuda.cuh"
/*
Atomic-max function. You may find it useful for normalization.
We haven't really talked about this yet, but __device__ functions not
only are run on the GPU, but are called from within a kernel.
Source:
http://stackoverflow.com/questions/17399119/
cant-we-use-atomic-operations-for-floating-point-variables-in-cuda
*/
__device__ static float atomicMin(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
/*
* Gillespie kernel. Each call to the kernel will advance each simulation
* by one step.
*
* We use the random numbers in rand_reactions to decide which transition
* occurs, and the random numbers in rand_times to decide on a dt.
*
* The times in simulation_times get updated with the calculated dt,
* and the concentrations/states may or may not get updated depending on the
* transition.
*/
__global__
void
cudaGillespieTimestepKernel(const float *rand_reactions,
const float *rand_times,
float *simulation_times,
float *simulation_concentrations,
State *simulation_states,
const unsigned int b,
const unsigned int g,
const float k_on,
const float k_off,
const unsigned int num_simulations) {
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
// Go through every simulation.
while (thread_index < num_simulations) {
float rand_reaction = rand_reactions[thread_index];
float curr_conc = simulation_concentrations[thread_index];
State curr_state = simulation_states[thread_index];
float lambda = 0;
if (curr_state == OFF) {
// lambda = sum of rate parameters.
lambda = k_on + (curr_conc * g);
float cutoff = k_on / lambda;
if (rand_reaction < cutoff) {
// Flip to on
simulation_states[thread_index] = ON;
} else {
// Decay
simulation_concentrations[thread_index]--;
}
} else {
// lambda = sum of rate parameters.
lambda = k_off + b + (curr_conc * g);
float cutoff1 = k_off / lambda;
float cutoff2 = cutoff1 + (b / lambda);
if (rand_reaction < cutoff1) {
// Flip to off
simulation_states[thread_index] = OFF;
} else if (rand_reaction < cutoff2) {
// Grow
simulation_concentrations[thread_index]++;
} else {
// Decay
simulation_concentrations[thread_index]--;
}
}
// Update time by calculated lambda.
float rand_time = rand_times[thread_index];
simulation_times[thread_index] += -log(rand_time) / lambda;
// Update thread_index.
thread_index += blockDim.x * gridDim.x;
}
}
/*
* Helper function to call Gillespie kernel.
*/
void cudaCallGillespieTimestepKernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *rand_reactions,
const float *rand_times,
float *simulation_times,
float *simulation_concentrations,
State *simulation_states,
const unsigned int b,
const unsigned int g,
const float k_on,
const float k_off,
const unsigned int num_simulations) {
cudaGillespieTimestepKernel<<<blocks, threads_per_block>>>(rand_reactions,
rand_times, simulation_times, simulation_concentrations,
simulation_states, b, g, k_on, k_off, num_simulations);
}
/*
* Resampling kernel. After each iteration of the Gillespie algorithm, update
* the values in an array of uniformly spaced samples. We use 1000 points
* "evenly" spaced from 0 to 100.
*
* For each simulation, check its current time. If the index corresponding
* to this time exceeds the last filled index, fill up to the current index.
* Then, update the last filled index.
*/
__global__
void
cudaResamplingKernel(float *simulation_samples,
int *last_sample_indices,
const float *simulation_times,
const float *simulation_concentrations,
const unsigned int num_simulations) {
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
// Go through every simulation.
while (thread_index < num_simulations) {
float *curr_sample = simulation_samples + (thread_index * SAMPLE_SIZE);
float curr_time = simulation_times[thread_index];
int last_sample_index = last_sample_indices[thread_index];
int curr_index = curr_time / ((float) SAMPLE_TIME / SAMPLE_SIZE);
// If the index corresponding to the current simulation time is
// beyond the last sample index, populate the array up to the
// curr_index.
if (curr_index > last_sample_index
&& last_sample_index < SAMPLE_SIZE) {
float curr_conc = simulation_concentrations[thread_index];
while (last_sample_index <= curr_index
&& last_sample_index < SAMPLE_SIZE) {
curr_sample[last_sample_index++] = curr_conc;
}
}
// Update last_sample_indices in GPU memory.
last_sample_indices[thread_index] = last_sample_index;
// Update thread_index.
thread_index += blockDim.x * gridDim.x;
}
}
/*
* Helper function to call Gillespie kernel.
*/
void cudaCallResamplingKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
int *last_sample_indices,
const float *simulation_times,
const float *simulation_concentrations,
const unsigned int num_simulations) {
cudaResamplingKernel<<<blocks, threads_per_block>>>(simulation_samples,
last_sample_indices, simulation_times, simulation_concentrations,
num_simulations);
}
/*
* Minimum kernel. Used to find the minimum in an array of floats. Mainly
* copied from the "maximum kernel" from lab 3.
*/
__global__
void
cudaMinimumKernel(const float *simulation_times,
float *min_val,
const unsigned int num_simulations) {
extern __shared__ float partial_outputs[];
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
float thread_min = INT_MAX;
while (thread_index < num_simulations) {
// Find the maximum MAGNITUDE (take abs value) for this thread.
thread_min = min(thread_min, simulation_times[thread_index]);
thread_index += blockDim.x * gridDim.x;
}
partial_outputs[threadIdx.x] = thread_min;
// Make sure all threads in block finish before continuing.
__syncthreads();
// Use the first thread in the block to calculate the block's
// max.
if (threadIdx.x == 0) {
float block_min = INT_MAX;
for (uint thread_idx = 0; thread_idx < blockDim.x; ++thread_idx) {
block_min = min(block_min, partial_outputs[thread_idx]);
}
// Now we take the max with the output.
atomicMin(min_val, block_min);
}
}
/*
* Helper function to call minimum kernel.
*/
void cudaCallMinimumKernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *simulation_times,
float *min_val,
const unsigned int num_simulations) {
cudaMinimumKernel<<<blocks, threads_per_block, threads_per_block * sizeof(float)>>>(
simulation_times, min_val, num_simulations);
}
/*
* Mean kernel. For each timepoint, we want to get the mean value for all the
* simulations. This means we must sum the values of all the simulations at
* that timepoint, then divide by the total number of simulations.
*/
__global__
void
cudaMeanKernel(float *simulation_samples,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations) {
extern __shared__ float sdata[];
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
sdata[threadIdx.x] = 0;
// Go through every simulation.
while (thread_index < num_simulations) {
float *curr_sample = simulation_samples + (thread_index * SAMPLE_SIZE);
float sample_conc = curr_sample[sample_index];
sdata[threadIdx.x] += sample_conc;
// Update thread_index.
thread_index += blockDim.x * gridDim.x;
}
__syncthreads();
// Use the first thread in the block to calculate the block's sum
if (threadIdx.x == 0) {
float block_sum = 0;
for (uint thread_idx = 0; thread_idx < blockDim.x; ++thread_idx) {
block_sum += sdata[thread_idx];
}
block_sum /= (float) num_simulations;
atomicAdd(sample_means + sample_index, block_sum);
}
}
/*
* Helper function to call mean kernel.
*/
void cudaCallMeanKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations) {
cudaMeanKernel<<<blocks, threads_per_block, threads_per_block * sizeof(float)>>>(
simulation_samples, sample_means, sample_index,
num_simulations);
}
/*
* Variance kernel. For each timepoint, we want to get the variance for all the
* simulations. This means we must take sum the squared differences from the mean
* at that timepoint, then divide by the total number of simulations. We rely
* on the fact that sample_means has been populated at sample_index
* (i.e. sample_means[sample_index] holds the correct average) for this
* kernel to work.
*/
__global__
void
cudaVarianceKernel(float *simulation_samples,
float *sample_vars,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations) {
extern __shared__ float sdata[];
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
float average = sample_means[sample_index];
sdata[threadIdx.x] = 0;
// Go through every simulation.
while (thread_index < num_simulations) {
float *curr_sample = simulation_samples + (thread_index * SAMPLE_SIZE);
float sample_conc = curr_sample[sample_index];
float sq_diff = powf(average - sample_conc, 2);
sdata[threadIdx.x] += sq_diff;
// Update thread_index.
thread_index += blockDim.x * gridDim.x;
}
__syncthreads();
// Use the first thread in the block to calculate the block's sum
if (threadIdx.x == 0) {
float block_sum = 0;
for (uint thread_idx = 0; thread_idx < blockDim.x; ++thread_idx) {
block_sum += sdata[thread_idx];
}
block_sum /= (float) num_simulations;
atomicAdd(sample_vars + sample_index, block_sum);
}
}
/*
* Helper function to call variance kernel.
*/
void cudaCallVarianceKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
float *sample_vars,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations) {
cudaVarianceKernel<<<blocks, threads_per_block, threads_per_block * sizeof(float)>>>(
simulation_samples, sample_vars, sample_means,
sample_index, num_simulations);
} | .file "tmpxft_000013fd_00000000-6_CudaGillespie_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2345:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2345:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z68__device_stub__Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5StatejjffjPKfS0_PfS1_P5Statejjffj
.type _Z68__device_stub__Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5StatejjffjPKfS0_PfS1_P5Statejjffj, @function
_Z68__device_stub__Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5StatejjffjPKfS0_PfS1_P5Statejjffj:
.LFB2367:
.cfi_startproc
endbr64
subq $232, %rsp
.cfi_def_cfa_offset 240
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movq %r8, 24(%rsp)
movl %r9d, 20(%rsp)
movss %xmm0, 16(%rsp)
movss %xmm1, 12(%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 24(%rsp), %rax
movq %rax, 160(%rsp)
leaq 20(%rsp), %rax
movq %rax, 168(%rsp)
leaq 240(%rsp), %rax
movq %rax, 176(%rsp)
leaq 16(%rsp), %rax
movq %rax, 184(%rsp)
leaq 12(%rsp), %rax
movq %rax, 192(%rsp)
leaq 248(%rsp), %rax
movq %rax, 200(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2367:
.size _Z68__device_stub__Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5StatejjffjPKfS0_PfS1_P5Statejjffj, .-_Z68__device_stub__Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5StatejjffjPKfS0_PfS1_P5Statejjffj
.globl _Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj
.type _Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj, @function
_Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj:
.LFB2368:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z68__device_stub__Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5StatejjffjPKfS0_PfS1_P5Statejjffj
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2368:
.size _Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj, .-_Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj
.globl _Z31cudaCallGillespieTimestepKerneljjPKfS0_PfS1_P5Statejjffj
.type _Z31cudaCallGillespieTimestepKerneljjPKfS0_PfS1_P5Statejjffj, @function
_Z31cudaCallGillespieTimestepKerneljjPKfS0_PfS1_P5Statejjffj:
.LFB2338:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %rdx, %rbx
movq %rcx, %rbp
movq %r8, %r12
movq %r9, %r13
movss %xmm0, 8(%rsp)
movss %xmm1, 12(%rsp)
movl %esi, 36(%rsp)
movl $1, 40(%rsp)
movl %edi, 24(%rsp)
movl $1, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L11:
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 104
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 112
movss 28(%rsp), %xmm1
movss 24(%rsp), %xmm0
movl 120(%rsp), %r9d
movq 112(%rsp), %r8
movq %r13, %rcx
movq %r12, %rdx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z68__device_stub__Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5StatejjffjPKfS0_PfS1_P5Statejjffj
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L11
.cfi_endproc
.LFE2338:
.size _Z31cudaCallGillespieTimestepKerneljjPKfS0_PfS1_P5Statejjffj, .-_Z31cudaCallGillespieTimestepKerneljjPKfS0_PfS1_P5Statejjffj
.globl _Z49__device_stub__Z20cudaResamplingKernelPfPiPKfS2_jPfPiPKfS2_j
.type _Z49__device_stub__Z20cudaResamplingKernelPfPiPKfS2_jPfPiPKfS2_j, @function
_Z49__device_stub__Z20cudaResamplingKernelPfPiPKfS2_jPfPiPKfS2_j:
.LFB2369:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z20cudaResamplingKernelPfPiPKfS2_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2369:
.size _Z49__device_stub__Z20cudaResamplingKernelPfPiPKfS2_jPfPiPKfS2_j, .-_Z49__device_stub__Z20cudaResamplingKernelPfPiPKfS2_jPfPiPKfS2_j
.globl _Z20cudaResamplingKernelPfPiPKfS2_j
.type _Z20cudaResamplingKernelPfPiPKfS2_j, @function
_Z20cudaResamplingKernelPfPiPKfS2_j:
.LFB2370:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z49__device_stub__Z20cudaResamplingKernelPfPiPKfS2_jPfPiPKfS2_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2370:
.size _Z20cudaResamplingKernelPfPiPKfS2_j, .-_Z20cudaResamplingKernelPfPiPKfS2_j
.globl _Z24cudaCallResamplingKerneljjPfPiPKfS2_j
.type _Z24cudaCallResamplingKerneljjPfPiPKfS2_j, @function
_Z24cudaCallResamplingKerneljjPfPiPKfS2_j:
.LFB2339:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %rdx, %rbx
movq %rcx, %rbp
movq %r8, %r12
movq %r9, %r13
movl %esi, 20(%rsp)
movl $1, 24(%rsp)
movl %edi, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L26
.L23:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
movl 80(%rsp), %r8d
movq %r13, %rcx
movq %r12, %rdx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z49__device_stub__Z20cudaResamplingKernelPfPiPKfS2_jPfPiPKfS2_j
jmp .L23
.cfi_endproc
.LFE2339:
.size _Z24cudaCallResamplingKerneljjPfPiPKfS2_j, .-_Z24cudaCallResamplingKerneljjPfPiPKfS2_j
.globl _Z41__device_stub__Z17cudaMinimumKernelPKfPfjPKfPfj
.type _Z41__device_stub__Z17cudaMinimumKernelPKfPfjPKfPfj, @function
_Z41__device_stub__Z17cudaMinimumKernelPKfPfjPKfPfj:
.LFB2371:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17cudaMinimumKernelPKfPfj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2371:
.size _Z41__device_stub__Z17cudaMinimumKernelPKfPfjPKfPfj, .-_Z41__device_stub__Z17cudaMinimumKernelPKfPfjPKfPfj
.globl _Z17cudaMinimumKernelPKfPfj
.type _Z17cudaMinimumKernelPKfPfj, @function
_Z17cudaMinimumKernelPKfPfj:
.LFB2372:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z17cudaMinimumKernelPKfPfjPKfPfj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2372:
.size _Z17cudaMinimumKernelPKfPfj, .-_Z17cudaMinimumKernelPKfPfj
.globl _Z21cudaCallMinimumKerneljjPKfPfj
.type _Z21cudaCallMinimumKerneljjPKfPfj, @function
_Z21cudaCallMinimumKerneljjPKfPfj:
.LFB2340:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdx, %rbx
movq %rcx, %rbp
movl %r8d, %r12d
movl %esi, 20(%rsp)
movl $1, 24(%rsp)
movl %edi, 8(%rsp)
movl $1, 12(%rsp)
movl %esi, %esi
movl $0, %r9d
leaq 0(,%rsi,4), %r8
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L38
.L35:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L38:
.cfi_restore_state
movl %r12d, %edx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z41__device_stub__Z17cudaMinimumKernelPKfPfjPKfPfj
jmp .L35
.cfi_endproc
.LFE2340:
.size _Z21cudaCallMinimumKerneljjPKfPfj, .-_Z21cudaCallMinimumKerneljjPKfPfj
.globl _Z38__device_stub__Z14cudaMeanKernelPfS_jjPfS_jj
.type _Z38__device_stub__Z14cudaMeanKernelPfS_jjPfS_jj, @function
_Z38__device_stub__Z14cudaMeanKernelPfS_jjPfS_jj:
.LFB2373:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L43
.L39:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L44
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14cudaMeanKernelPfS_jj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L39
.L44:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2373:
.size _Z38__device_stub__Z14cudaMeanKernelPfS_jjPfS_jj, .-_Z38__device_stub__Z14cudaMeanKernelPfS_jjPfS_jj
.globl _Z14cudaMeanKernelPfS_jj
.type _Z14cudaMeanKernelPfS_jj, @function
_Z14cudaMeanKernelPfS_jj:
.LFB2374:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z14cudaMeanKernelPfS_jjPfS_jj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2374:
.size _Z14cudaMeanKernelPfS_jj, .-_Z14cudaMeanKernelPfS_jj
.globl _Z18cudaCallMeanKerneljjPfS_jj
.type _Z18cudaCallMeanKerneljjPfS_jj, @function
_Z18cudaCallMeanKerneljjPfS_jj:
.LFB2341:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %rdx, %rbx
movq %rcx, %rbp
movl %r8d, %r12d
movl %r9d, %r13d
movl %esi, 20(%rsp)
movl $1, 24(%rsp)
movl %edi, 8(%rsp)
movl $1, 12(%rsp)
movl %esi, %esi
movl $0, %r9d
leaq 0(,%rsi,4), %r8
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L50
.L47:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L50:
.cfi_restore_state
movl %r13d, %ecx
movl %r12d, %edx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z38__device_stub__Z14cudaMeanKernelPfS_jjPfS_jj
jmp .L47
.cfi_endproc
.LFE2341:
.size _Z18cudaCallMeanKerneljjPfS_jj, .-_Z18cudaCallMeanKerneljjPfS_jj
.globl _Z44__device_stub__Z18cudaVarianceKernelPfS_S_jjPfS_S_jj
.type _Z44__device_stub__Z18cudaVarianceKernelPfS_S_jjPfS_S_jj, @function
_Z44__device_stub__Z18cudaVarianceKernelPfS_S_jjPfS_S_jj:
.LFB2375:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L55
.L51:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L56
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L55:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z18cudaVarianceKernelPfS_S_jj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L51
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2375:
.size _Z44__device_stub__Z18cudaVarianceKernelPfS_S_jjPfS_S_jj, .-_Z44__device_stub__Z18cudaVarianceKernelPfS_S_jjPfS_S_jj
.globl _Z18cudaVarianceKernelPfS_S_jj
.type _Z18cudaVarianceKernelPfS_S_jj, @function
_Z18cudaVarianceKernelPfS_S_jj:
.LFB2376:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z18cudaVarianceKernelPfS_S_jjPfS_S_jj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2376:
.size _Z18cudaVarianceKernelPfS_S_jj, .-_Z18cudaVarianceKernelPfS_S_jj
.globl _Z22cudaCallVarianceKerneljjPfS_S_jj
.type _Z22cudaCallVarianceKerneljjPfS_S_jj, @function
_Z22cudaCallVarianceKerneljjPfS_S_jj:
.LFB2342:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %rdx, %rbx
movq %rcx, %rbp
movq %r8, %r12
movl %r9d, %r13d
movl %esi, 20(%rsp)
movl $1, 24(%rsp)
movl %edi, 8(%rsp)
movl $1, 12(%rsp)
movl %esi, %esi
movl $0, %r9d
leaq 0(,%rsi,4), %r8
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L62
.L59:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L62:
.cfi_restore_state
movl 80(%rsp), %r8d
movl %r13d, %ecx
movq %r12, %rdx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z44__device_stub__Z18cudaVarianceKernelPfS_S_jjPfS_S_jj
jmp .L59
.cfi_endproc
.LFE2342:
.size _Z22cudaCallVarianceKerneljjPfS_S_jj, .-_Z22cudaCallVarianceKerneljjPfS_S_jj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z18cudaVarianceKernelPfS_S_jj"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z14cudaMeanKernelPfS_jj"
.LC2:
.string "_Z17cudaMinimumKernelPKfPfj"
.section .rodata.str1.8
.align 8
.LC3:
.string "_Z20cudaResamplingKernelPfPiPKfS2_j"
.align 8
.LC4:
.string "_Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2378:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z18cudaVarianceKernelPfS_S_jj(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z14cudaMeanKernelPfS_jj(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z17cudaMinimumKernelPKfPfj(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z20cudaResamplingKernelPfPiPKfS2_j(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2378:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cstdio>
#include <cmath>
#include <algorithm>
#include <climits>
#include <cuda_runtime.h>
#include "CudaGillespie_cuda.cuh"
/*
Atomic-max function. You may find it useful for normalization.
We haven't really talked about this yet, but __device__ functions not
only are run on the GPU, but are called from within a kernel.
Source:
http://stackoverflow.com/questions/17399119/
cant-we-use-atomic-operations-for-floating-point-variables-in-cuda
*/
__device__ static float atomicMin(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
/*
* Gillespie kernel. Each call to the kernel will advance each simulation
* by one step.
*
* We use the random numbers in rand_reactions to decide which transition
* occurs, and the random numbers in rand_times to decide on a dt.
*
* The times in simulation_times get updated with the calculated dt,
* and the concentrations/states may or may not get updated depending on the
* transition.
*/
__global__
void
cudaGillespieTimestepKernel(const float *rand_reactions,
const float *rand_times,
float *simulation_times,
float *simulation_concentrations,
State *simulation_states,
const unsigned int b,
const unsigned int g,
const float k_on,
const float k_off,
const unsigned int num_simulations) {
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
// Go through every simulation.
while (thread_index < num_simulations) {
float rand_reaction = rand_reactions[thread_index];
float curr_conc = simulation_concentrations[thread_index];
State curr_state = simulation_states[thread_index];
float lambda = 0;
if (curr_state == OFF) {
// lambda = sum of rate parameters.
lambda = k_on + (curr_conc * g);
float cutoff = k_on / lambda;
if (rand_reaction < cutoff) {
// Flip to on
simulation_states[thread_index] = ON;
} else {
// Decay
simulation_concentrations[thread_index]--;
}
} else {
// lambda = sum of rate parameters.
lambda = k_off + b + (curr_conc * g);
float cutoff1 = k_off / lambda;
float cutoff2 = cutoff1 + (b / lambda);
if (rand_reaction < cutoff1) {
// Flip to off
simulation_states[thread_index] = OFF;
} else if (rand_reaction < cutoff2) {
// Grow
simulation_concentrations[thread_index]++;
} else {
// Decay
simulation_concentrations[thread_index]--;
}
}
// Update time by calculated lambda.
float rand_time = rand_times[thread_index];
simulation_times[thread_index] += -log(rand_time) / lambda;
// Update thread_index.
thread_index += blockDim.x * gridDim.x;
}
}
/*
* Helper function to call Gillespie kernel.
*/
void cudaCallGillespieTimestepKernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *rand_reactions,
const float *rand_times,
float *simulation_times,
float *simulation_concentrations,
State *simulation_states,
const unsigned int b,
const unsigned int g,
const float k_on,
const float k_off,
const unsigned int num_simulations) {
cudaGillespieTimestepKernel<<<blocks, threads_per_block>>>(rand_reactions,
rand_times, simulation_times, simulation_concentrations,
simulation_states, b, g, k_on, k_off, num_simulations);
}
/*
* Resampling kernel. After each iteration of the Gillespie algorithm, update
* the values in an array of uniformly spaced samples. We use 1000 points
* "evenly" spaced from 0 to 100.
*
* For each simulation, check its current time. If the index corresponding
* to this time exceeds the last filled index, fill up to the current index.
* Then, update the last filled index.
*/
__global__
void
cudaResamplingKernel(float *simulation_samples,
int *last_sample_indices,
const float *simulation_times,
const float *simulation_concentrations,
const unsigned int num_simulations) {
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
// Go through every simulation.
while (thread_index < num_simulations) {
float *curr_sample = simulation_samples + (thread_index * SAMPLE_SIZE);
float curr_time = simulation_times[thread_index];
int last_sample_index = last_sample_indices[thread_index];
int curr_index = curr_time / ((float) SAMPLE_TIME / SAMPLE_SIZE);
// If the index corresponding to the current simulation time is
// beyond the last sample index, populate the array up to the
// curr_index.
if (curr_index > last_sample_index
&& last_sample_index < SAMPLE_SIZE) {
float curr_conc = simulation_concentrations[thread_index];
while (last_sample_index <= curr_index
&& last_sample_index < SAMPLE_SIZE) {
curr_sample[last_sample_index++] = curr_conc;
}
}
// Update last_sample_indices in GPU memory.
last_sample_indices[thread_index] = last_sample_index;
// Update thread_index.
thread_index += blockDim.x * gridDim.x;
}
}
/*
* Helper function to call Gillespie kernel.
*/
void cudaCallResamplingKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
int *last_sample_indices,
const float *simulation_times,
const float *simulation_concentrations,
const unsigned int num_simulations) {
cudaResamplingKernel<<<blocks, threads_per_block>>>(simulation_samples,
last_sample_indices, simulation_times, simulation_concentrations,
num_simulations);
}
/*
* Minimum kernel. Used to find the minimum in an array of floats. Mainly
* copied from the "maximum kernel" from lab 3.
*/
__global__
void
cudaMinimumKernel(const float *simulation_times,
float *min_val,
const unsigned int num_simulations) {
extern __shared__ float partial_outputs[];
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
float thread_min = INT_MAX;
while (thread_index < num_simulations) {
// Find the maximum MAGNITUDE (take abs value) for this thread.
thread_min = min(thread_min, simulation_times[thread_index]);
thread_index += blockDim.x * gridDim.x;
}
partial_outputs[threadIdx.x] = thread_min;
// Make sure all threads in block finish before continuing.
__syncthreads();
// Use the first thread in the block to calculate the block's
// max.
if (threadIdx.x == 0) {
float block_min = INT_MAX;
for (uint thread_idx = 0; thread_idx < blockDim.x; ++thread_idx) {
block_min = min(block_min, partial_outputs[thread_idx]);
}
// Now we take the max with the output.
atomicMin(min_val, block_min);
}
}
/*
* Helper function to call minimum kernel.
*/
void cudaCallMinimumKernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *simulation_times,
float *min_val,
const unsigned int num_simulations) {
cudaMinimumKernel<<<blocks, threads_per_block, threads_per_block * sizeof(float)>>>(
simulation_times, min_val, num_simulations);
}
/*
* Mean kernel. For each timepoint, we want to get the mean value for all the
* simulations. This means we must sum the values of all the simulations at
* that timepoint, then divide by the total number of simulations.
*/
__global__
void
cudaMeanKernel(float *simulation_samples,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations) {
extern __shared__ float sdata[];
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
sdata[threadIdx.x] = 0;
// Go through every simulation.
while (thread_index < num_simulations) {
float *curr_sample = simulation_samples + (thread_index * SAMPLE_SIZE);
float sample_conc = curr_sample[sample_index];
sdata[threadIdx.x] += sample_conc;
// Update thread_index.
thread_index += blockDim.x * gridDim.x;
}
__syncthreads();
// Use the first thread in the block to calculate the block's sum
if (threadIdx.x == 0) {
float block_sum = 0;
for (uint thread_idx = 0; thread_idx < blockDim.x; ++thread_idx) {
block_sum += sdata[thread_idx];
}
block_sum /= (float) num_simulations;
atomicAdd(sample_means + sample_index, block_sum);
}
}
/*
* Helper function to call mean kernel.
*/
void cudaCallMeanKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations) {
cudaMeanKernel<<<blocks, threads_per_block, threads_per_block * sizeof(float)>>>(
simulation_samples, sample_means, sample_index,
num_simulations);
}
/*
* Variance kernel. For each timepoint, we want to get the variance for all the
* simulations. This means we must take sum the squared differences from the mean
* at that timepoint, then divide by the total number of simulations. We rely
* on the fact that sample_means has been populated at sample_index
* (i.e. sample_means[sample_index] holds the correct average) for this
* kernel to work.
*/
__global__
void
cudaVarianceKernel(float *simulation_samples,
float *sample_vars,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations) {
extern __shared__ float sdata[];
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
float average = sample_means[sample_index];
sdata[threadIdx.x] = 0;
// Go through every simulation.
while (thread_index < num_simulations) {
float *curr_sample = simulation_samples + (thread_index * SAMPLE_SIZE);
float sample_conc = curr_sample[sample_index];
float sq_diff = powf(average - sample_conc, 2);
sdata[threadIdx.x] += sq_diff;
// Update thread_index.
thread_index += blockDim.x * gridDim.x;
}
__syncthreads();
// Use the first thread in the block to calculate the block's sum
if (threadIdx.x == 0) {
float block_sum = 0;
for (uint thread_idx = 0; thread_idx < blockDim.x; ++thread_idx) {
block_sum += sdata[thread_idx];
}
block_sum /= (float) num_simulations;
atomicAdd(sample_vars + sample_index, block_sum);
}
}
/*
* Helper function to call variance kernel.
*/
void cudaCallVarianceKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
float *sample_vars,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations) {
cudaVarianceKernel<<<blocks, threads_per_block, threads_per_block * sizeof(float)>>>(
simulation_samples, sample_vars, sample_means,
sample_index, num_simulations);
} | #ifndef CUDA_GILLESPIE_CUDA_CUH
#define CUDA_GILLESPIE_CUDA_CUH
#define SAMPLE_TIME 100
#define SAMPLE_SIZE 1000
enum State { OFF, ON };
void cudaCallGillespieTimestepKernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *rand_reactions,
const float *rand_times,
float *simulation_times,
float *simulation_concentrations,
State *simulation_states,
const unsigned int b,
const unsigned int g,
const float k_on,
const float k_off,
const unsigned int num_simulations);
void cudaCallResamplingKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
int *last_sample_indices,
const float *simulation_times,
const float *simulation_concentrations,
const unsigned int num_simulations);
void cudaCallMinimumKernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *simulation_times,
float *min_val,
const unsigned int num_simulations);
void cudaCallMeanKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations);
void cudaCallVarianceKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
float *sample_vars,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations);
#endif // CUDA_GILLESPIE_CUDA_CUH |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #ifndef CUDA_GILLESPIE_CUDA_CUH
#define CUDA_GILLESPIE_CUDA_CUH
#define SAMPLE_TIME 100
#define SAMPLE_SIZE 1000
enum State { OFF, ON };
void cudaCallGillespieTimestepKernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *rand_reactions,
const float *rand_times,
float *simulation_times,
float *simulation_concentrations,
State *simulation_states,
const unsigned int b,
const unsigned int g,
const float k_on,
const float k_off,
const unsigned int num_simulations);
void cudaCallResamplingKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
int *last_sample_indices,
const float *simulation_times,
const float *simulation_concentrations,
const unsigned int num_simulations);
void cudaCallMinimumKernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *simulation_times,
float *min_val,
const unsigned int num_simulations);
void cudaCallMeanKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations);
void cudaCallVarianceKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
float *sample_vars,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations);
#endif // CUDA_GILLESPIE_CUDA_CUH | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #ifndef CUDA_GILLESPIE_CUDA_CUH
#define CUDA_GILLESPIE_CUDA_CUH
#define SAMPLE_TIME 100
#define SAMPLE_SIZE 1000
enum State { OFF, ON };
void cudaCallGillespieTimestepKernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *rand_reactions,
const float *rand_times,
float *simulation_times,
float *simulation_concentrations,
State *simulation_states,
const unsigned int b,
const unsigned int g,
const float k_on,
const float k_off,
const unsigned int num_simulations);
void cudaCallResamplingKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
int *last_sample_indices,
const float *simulation_times,
const float *simulation_concentrations,
const unsigned int num_simulations);
void cudaCallMinimumKernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *simulation_times,
float *min_val,
const unsigned int num_simulations);
void cudaCallMeanKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations);
void cudaCallVarianceKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
float *sample_vars,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations);
#endif // CUDA_GILLESPIE_CUDA_CUH | .text
.file "CudaGillespie_cuda.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000013fd_00000000-6_CudaGillespie_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2345:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2345:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z68__device_stub__Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5StatejjffjPKfS0_PfS1_P5Statejjffj
.type _Z68__device_stub__Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5StatejjffjPKfS0_PfS1_P5Statejjffj, @function
_Z68__device_stub__Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5StatejjffjPKfS0_PfS1_P5Statejjffj:
.LFB2367:
.cfi_startproc
endbr64
subq $232, %rsp
.cfi_def_cfa_offset 240
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movq %r8, 24(%rsp)
movl %r9d, 20(%rsp)
movss %xmm0, 16(%rsp)
movss %xmm1, 12(%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 24(%rsp), %rax
movq %rax, 160(%rsp)
leaq 20(%rsp), %rax
movq %rax, 168(%rsp)
leaq 240(%rsp), %rax
movq %rax, 176(%rsp)
leaq 16(%rsp), %rax
movq %rax, 184(%rsp)
leaq 12(%rsp), %rax
movq %rax, 192(%rsp)
leaq 248(%rsp), %rax
movq %rax, 200(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2367:
.size _Z68__device_stub__Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5StatejjffjPKfS0_PfS1_P5Statejjffj, .-_Z68__device_stub__Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5StatejjffjPKfS0_PfS1_P5Statejjffj
.globl _Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj
.type _Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj, @function
_Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj:
.LFB2368:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z68__device_stub__Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5StatejjffjPKfS0_PfS1_P5Statejjffj
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2368:
.size _Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj, .-_Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj
.globl _Z31cudaCallGillespieTimestepKerneljjPKfS0_PfS1_P5Statejjffj
.type _Z31cudaCallGillespieTimestepKerneljjPKfS0_PfS1_P5Statejjffj, @function
_Z31cudaCallGillespieTimestepKerneljjPKfS0_PfS1_P5Statejjffj:
.LFB2338:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %rdx, %rbx
movq %rcx, %rbp
movq %r8, %r12
movq %r9, %r13
movss %xmm0, 8(%rsp)
movss %xmm1, 12(%rsp)
movl %esi, 36(%rsp)
movl $1, 40(%rsp)
movl %edi, 24(%rsp)
movl $1, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L11:
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 104
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 112
movss 28(%rsp), %xmm1
movss 24(%rsp), %xmm0
movl 120(%rsp), %r9d
movq 112(%rsp), %r8
movq %r13, %rcx
movq %r12, %rdx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z68__device_stub__Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5StatejjffjPKfS0_PfS1_P5Statejjffj
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L11
.cfi_endproc
.LFE2338:
.size _Z31cudaCallGillespieTimestepKerneljjPKfS0_PfS1_P5Statejjffj, .-_Z31cudaCallGillespieTimestepKerneljjPKfS0_PfS1_P5Statejjffj
.globl _Z49__device_stub__Z20cudaResamplingKernelPfPiPKfS2_jPfPiPKfS2_j
.type _Z49__device_stub__Z20cudaResamplingKernelPfPiPKfS2_jPfPiPKfS2_j, @function
_Z49__device_stub__Z20cudaResamplingKernelPfPiPKfS2_jPfPiPKfS2_j:
.LFB2369:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z20cudaResamplingKernelPfPiPKfS2_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2369:
.size _Z49__device_stub__Z20cudaResamplingKernelPfPiPKfS2_jPfPiPKfS2_j, .-_Z49__device_stub__Z20cudaResamplingKernelPfPiPKfS2_jPfPiPKfS2_j
.globl _Z20cudaResamplingKernelPfPiPKfS2_j
.type _Z20cudaResamplingKernelPfPiPKfS2_j, @function
_Z20cudaResamplingKernelPfPiPKfS2_j:
.LFB2370:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z49__device_stub__Z20cudaResamplingKernelPfPiPKfS2_jPfPiPKfS2_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2370:
.size _Z20cudaResamplingKernelPfPiPKfS2_j, .-_Z20cudaResamplingKernelPfPiPKfS2_j
.globl _Z24cudaCallResamplingKerneljjPfPiPKfS2_j
.type _Z24cudaCallResamplingKerneljjPfPiPKfS2_j, @function
_Z24cudaCallResamplingKerneljjPfPiPKfS2_j:
.LFB2339:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %rdx, %rbx
movq %rcx, %rbp
movq %r8, %r12
movq %r9, %r13
movl %esi, 20(%rsp)
movl $1, 24(%rsp)
movl %edi, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L26
.L23:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
movl 80(%rsp), %r8d
movq %r13, %rcx
movq %r12, %rdx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z49__device_stub__Z20cudaResamplingKernelPfPiPKfS2_jPfPiPKfS2_j
jmp .L23
.cfi_endproc
.LFE2339:
.size _Z24cudaCallResamplingKerneljjPfPiPKfS2_j, .-_Z24cudaCallResamplingKerneljjPfPiPKfS2_j
.globl _Z41__device_stub__Z17cudaMinimumKernelPKfPfjPKfPfj
.type _Z41__device_stub__Z17cudaMinimumKernelPKfPfjPKfPfj, @function
_Z41__device_stub__Z17cudaMinimumKernelPKfPfjPKfPfj:
.LFB2371:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17cudaMinimumKernelPKfPfj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2371:
.size _Z41__device_stub__Z17cudaMinimumKernelPKfPfjPKfPfj, .-_Z41__device_stub__Z17cudaMinimumKernelPKfPfjPKfPfj
.globl _Z17cudaMinimumKernelPKfPfj
.type _Z17cudaMinimumKernelPKfPfj, @function
_Z17cudaMinimumKernelPKfPfj:
.LFB2372:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z17cudaMinimumKernelPKfPfjPKfPfj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2372:
.size _Z17cudaMinimumKernelPKfPfj, .-_Z17cudaMinimumKernelPKfPfj
.globl _Z21cudaCallMinimumKerneljjPKfPfj
.type _Z21cudaCallMinimumKerneljjPKfPfj, @function
_Z21cudaCallMinimumKerneljjPKfPfj:
.LFB2340:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdx, %rbx
movq %rcx, %rbp
movl %r8d, %r12d
movl %esi, 20(%rsp)
movl $1, 24(%rsp)
movl %edi, 8(%rsp)
movl $1, 12(%rsp)
movl %esi, %esi
movl $0, %r9d
leaq 0(,%rsi,4), %r8
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L38
.L35:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L38:
.cfi_restore_state
movl %r12d, %edx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z41__device_stub__Z17cudaMinimumKernelPKfPfjPKfPfj
jmp .L35
.cfi_endproc
.LFE2340:
.size _Z21cudaCallMinimumKerneljjPKfPfj, .-_Z21cudaCallMinimumKerneljjPKfPfj
.globl _Z38__device_stub__Z14cudaMeanKernelPfS_jjPfS_jj
.type _Z38__device_stub__Z14cudaMeanKernelPfS_jjPfS_jj, @function
_Z38__device_stub__Z14cudaMeanKernelPfS_jjPfS_jj:
.LFB2373:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L43
.L39:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L44
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14cudaMeanKernelPfS_jj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L39
.L44:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2373:
.size _Z38__device_stub__Z14cudaMeanKernelPfS_jjPfS_jj, .-_Z38__device_stub__Z14cudaMeanKernelPfS_jjPfS_jj
.globl _Z14cudaMeanKernelPfS_jj
.type _Z14cudaMeanKernelPfS_jj, @function
_Z14cudaMeanKernelPfS_jj:
.LFB2374:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z14cudaMeanKernelPfS_jjPfS_jj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2374:
.size _Z14cudaMeanKernelPfS_jj, .-_Z14cudaMeanKernelPfS_jj
.globl _Z18cudaCallMeanKerneljjPfS_jj
.type _Z18cudaCallMeanKerneljjPfS_jj, @function
_Z18cudaCallMeanKerneljjPfS_jj:
.LFB2341:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %rdx, %rbx
movq %rcx, %rbp
movl %r8d, %r12d
movl %r9d, %r13d
movl %esi, 20(%rsp)
movl $1, 24(%rsp)
movl %edi, 8(%rsp)
movl $1, 12(%rsp)
movl %esi, %esi
movl $0, %r9d
leaq 0(,%rsi,4), %r8
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L50
.L47:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L50:
.cfi_restore_state
movl %r13d, %ecx
movl %r12d, %edx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z38__device_stub__Z14cudaMeanKernelPfS_jjPfS_jj
jmp .L47
.cfi_endproc
.LFE2341:
.size _Z18cudaCallMeanKerneljjPfS_jj, .-_Z18cudaCallMeanKerneljjPfS_jj
.globl _Z44__device_stub__Z18cudaVarianceKernelPfS_S_jjPfS_S_jj
.type _Z44__device_stub__Z18cudaVarianceKernelPfS_S_jjPfS_S_jj, @function
_Z44__device_stub__Z18cudaVarianceKernelPfS_S_jjPfS_S_jj:
.LFB2375:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L55
.L51:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L56
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L55:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z18cudaVarianceKernelPfS_S_jj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L51
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2375:
.size _Z44__device_stub__Z18cudaVarianceKernelPfS_S_jjPfS_S_jj, .-_Z44__device_stub__Z18cudaVarianceKernelPfS_S_jjPfS_S_jj
.globl _Z18cudaVarianceKernelPfS_S_jj
.type _Z18cudaVarianceKernelPfS_S_jj, @function
_Z18cudaVarianceKernelPfS_S_jj:
.LFB2376:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z18cudaVarianceKernelPfS_S_jjPfS_S_jj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2376:
.size _Z18cudaVarianceKernelPfS_S_jj, .-_Z18cudaVarianceKernelPfS_S_jj
.globl _Z22cudaCallVarianceKerneljjPfS_S_jj
.type _Z22cudaCallVarianceKerneljjPfS_S_jj, @function
_Z22cudaCallVarianceKerneljjPfS_S_jj:
.LFB2342:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %rdx, %rbx
movq %rcx, %rbp
movq %r8, %r12
movl %r9d, %r13d
movl %esi, 20(%rsp)
movl $1, 24(%rsp)
movl %edi, 8(%rsp)
movl $1, 12(%rsp)
movl %esi, %esi
movl $0, %r9d
leaq 0(,%rsi,4), %r8
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L62
.L59:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L62:
.cfi_restore_state
movl 80(%rsp), %r8d
movl %r13d, %ecx
movq %r12, %rdx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z44__device_stub__Z18cudaVarianceKernelPfS_S_jjPfS_S_jj
jmp .L59
.cfi_endproc
.LFE2342:
.size _Z22cudaCallVarianceKerneljjPfS_S_jj, .-_Z22cudaCallVarianceKerneljjPfS_S_jj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z18cudaVarianceKernelPfS_S_jj"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z14cudaMeanKernelPfS_jj"
.LC2:
.string "_Z17cudaMinimumKernelPKfPfj"
.section .rodata.str1.8
.align 8
.LC3:
.string "_Z20cudaResamplingKernelPfPiPKfS2_j"
.align 8
.LC4:
.string "_Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2378:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z18cudaVarianceKernelPfS_S_jj(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z14cudaMeanKernelPfS_jj(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z17cudaMinimumKernelPKfPfj(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z20cudaResamplingKernelPfPiPKfS2_j(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z27cudaGillespieTimestepKernelPKfS0_PfS1_P5Statejjffj(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2378:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "CudaGillespie_cuda.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
struct struct_linked_list_node;
typedef struct struct_linked_list_node {
int data;
struct struct_linked_list_node *next;
} linked_list_node;
typedef struct {
linked_list_node *head, *tail;
} linked_list;
__device__ bool _ll_is_marked(linked_list_node *n) {
return ((unsigned long long int) n & 1);
}
__device__ linked_list_node *_ll_marked(linked_list_node *n) {
return (linked_list_node *) ((unsigned long long int) n | 1);
}
__device__ linked_list_node *_ll_unmarked(linked_list_node *n) {
return (linked_list_node *) ((unsigned long long int) n & (~1));
}
__device__ linked_list_node *ll_create_node(int data) {
linked_list_node *lln = (linked_list_node *) malloc(sizeof(linked_list_node));
lln->data = data;
lln->next = NULL;
return lln;
}
__device__ linked_list_node *ll_first(linked_list *ll) {
return ll->head->next;
}
__device__ void ll_reset(linked_list *ll) {
ll->head = ll_create_node(INT_MIN);
ll->tail = ll_create_node(INT_MAX);
ll->head->next = ll->tail;
}
__device__ linked_list *ll_create() {
linked_list *ll = (linked_list *) malloc(sizeof(linked_list));
ll_reset(ll);
return ll;
}
__device__ void ll_print(linked_list *ll) {
linked_list_node *n = ll->head;
while (n) {
printf("%d @ ", n->data);
printf("%x\n", n);
n = _ll_unmarked(n->next);
}
}
__device__ void _ll_free_nodes(linked_list_node *from, linked_list_node *to) {
//*
from = _ll_unmarked(from);
while (from && from != to) {
linked_list_node *next = from->next;
free(from);
from = _ll_unmarked(next);
}
//*/
}
__device__ linked_list_node *ll_search(linked_list *ll,
int data,
linked_list_node **left_node
) {
linked_list_node *left_node_next, *right_node;
while (true) {
linked_list_node *t = ll->head;
linked_list_node *t_next = ll->head->next;
do {
if (!_ll_is_marked(t_next)) {
*left_node = t;
left_node_next = t_next;
}
t = _ll_unmarked(t_next);
if (t == ll->tail)
break;
t_next = t->next;
} while (_ll_is_marked(t_next) || (t->data < data));
right_node = t;
if (left_node_next == right_node)
if ((right_node != ll->tail) && _ll_is_marked(right_node->next))
continue;
else
return right_node;
unsigned long long int old = (unsigned long long int) left_node_next;
if (old == atomicCAS((unsigned long long int *) &((*left_node)->next),
old,
(unsigned long long int ) right_node
)
) {
_ll_free_nodes((linked_list_node *) old, right_node);
if ((right_node != ll->tail) && _ll_is_marked(right_node->next))
continue;
else
return right_node;
}
}
}
__device__ bool ll_insert(linked_list *ll, int data) {
linked_list_node *new_node = ll_create_node(data);
linked_list_node *right_node, *left_node;
while (true) {
right_node = ll_search(ll, data, &left_node);
if ((right_node != ll->tail) && (right_node->data == data))
return false;
new_node->next = right_node;
unsigned long long int old = (unsigned long long int) right_node;
if (old == atomicCAS((unsigned long long int *) &(left_node->next),
old,
(unsigned long long int ) new_node
)
)
return true;
}
}
__device__ bool ll_remove(linked_list *ll, int data) {
linked_list_node * right_node, *right_node_next, *left_node;
//printf("ll_remove(%d)\n", data);
while (true) {
right_node = ll_search(ll, data, &left_node);
//printf("ll_remove: found %d\n", right_node->data);
if ((right_node == ll->tail) || (right_node->data != data))
return false;
right_node_next = right_node->next;
if (!_ll_is_marked(right_node_next)) {
unsigned long long int old = (unsigned long long int) right_node_next;
if (old == atomicCAS((unsigned long long int *) &(right_node->next),
old,
(unsigned long long int ) _ll_marked(right_node_next)
)
)
break;
}
}
unsigned long long int old = (unsigned long long int) right_node;
if (old == atomicCAS((unsigned long long int *) &(left_node->next),
old,
(unsigned long long int ) right_node_next
)
)
_ll_free_nodes((linked_list_node *) old, right_node_next);
else
right_node = ll_search(ll, right_node->data, &left_node);
return true;
}
// call ONCE!
__device__ void ll_free(linked_list *ll) {
// todo
if (ll) {
linked_list_node *lln = ll->head;
while (lln) {
linked_list_node *next = lln->next;
free(lln);
lln = next;
}
free(ll);
ll = NULL;
}
}
__device__ void ll_free_safe(linked_list *ll) {
if (ll) {
linked_list_node *lln = _ll_unmarked(ll->head);
while (lln) {
linked_list_node *next = _ll_unmarked(lln->next);
free(lln);
lln = next;
}
free(ll);
ll = NULL;
}
}
__device__ void ll_to_array(linked_list *ll, int *arr, unsigned int l) {
linked_list_node *node = ll->head;
if (node) // skip head
node = node->next;
unsigned int i = 0;
while (node && i < l) {
node = node->next;
arr[i++] = node->data;
}
// fill remaining cells with 0, if any
while (i < l)
arr[i++] = 13373; // todo
}
__device__ void ll_to_array_safe(linked_list *ll, int *arr, unsigned int l) {
linked_list_node *node = ll->head;
linked_list_node *unmarked = _ll_unmarked(node);
if (node) { // skip head
node = node->next;
unmarked = _ll_unmarked(node);
}
unsigned int i = 0;
while (unmarked && i < l) {
if (!_ll_is_marked(unmarked->next))
arr[i++] = unmarked->data;
node = unmarked->next;
unmarked = _ll_unmarked(node);
}
while (i < l)
arr[i++] = 13373; // todo
}
// grid
typedef struct {
float lower_x, lower_y, cell_width, cell_height;
unsigned int cols, rows;
linked_list **cells;
} grid;
/*
__device__ grid *grid_create() {
grid *g = (grid *) malloc(sizeof(grid));
return g;
}
*/
grid *grid_host_create(float lower_x,
float lower_y,
float cell_width,
float cell_height,
unsigned int cols,
unsigned int rows
) {
grid *d_grid, h_grid;
cudaMalloc((void **) &d_grid, sizeof(grid));
h_grid.lower_x = lower_x;
h_grid.lower_y = lower_y;
h_grid.cell_width = cell_width;
h_grid.cell_height = cell_height;
h_grid.cols = cols;
h_grid.rows = rows;
linked_list **cells;
cudaMalloc((void **) &cells, rows * cols * sizeof(linked_list *));
h_grid.cells = cells;
printf("h_grid.cells %x\n", cells);
printf("h_grid.cell_height %f\n", h_grid.cell_height);
printf("h_grid.cell_width %f\n", h_grid.cell_width);
cudaMemcpy(d_grid,
&h_grid,
sizeof(grid),
cudaMemcpyHostToDevice
);
return d_grid;
}
__device__ void grid_alloc_cells(grid *g) {
unsigned int num_cells = g->rows * g->cols;
printf("num_cells %d\n", num_cells);
printf("grid.cells %x\n", g->cells);
for (unsigned int i = 0; i < num_cells; i++) {
g->cells[i] = ll_create();
//printf("created cell ll at %x\n", g->cells[i]);
}
}
__device__ linked_list *_grid_cell(grid *g, float2 pos) {
//printf("x %f\n", pos.x);
//printf("y %f\n", pos.y);
//printf("cell_width %f\n", g->cell_width);
//printf("cell_height %f\n", g->cell_height);
unsigned int col = floor((pos.x - g->lower_x) / g->cell_width);
unsigned int row = floor((pos.y - g->lower_y) / g->cell_height);
//printf("col %d\n", col);
//printf("row %d\n", row);
unsigned int i = row * (g->cols) + col;
if (col < g->cols && row < g->rows) {
//printf("_grid_cell: %x\n", g->cells[i]);
return g->cells[i];
} else {
printf("_grid_cell: NULL\n");
return NULL;
}
}
__device__ linked_list *grid_add_node(grid *g, float2 pos, int n) {
linked_list *cell = _grid_cell(g, pos);
/*
printf("not calling ll_insert\n");
return cell;
//*/
/*
printf("cell at %x\n", cell);
return cell;
*/
if (cell) {
//printf("calling ll_insert NOT\n");
ll_insert(cell, n);
}
return cell;
}
__device__ bool grid_remove_node(grid *g, float2 pos, int n) {
linked_list *cell = _grid_cell(g, pos);
if (cell)
return ll_remove(cell, n);
else
return false;
}
__device__ linked_list **grid_neighbours(grid *g, float2 pos) {
unsigned int col = floor((pos.x - g->lower_x) / g->cell_width);
unsigned int row = floor((pos.y - g->lower_y) / g->cell_height);
if (col >= g->cols || row >= g->rows) {
printf("this cell does not exist: col %d, row %d\n", col, row);
printf("no such cell, pos: %f, %f\n", pos.x, pos.y);
return NULL;
}
linked_list **neighbours = (linked_list **) malloc(9 * sizeof(linked_list *));
unsigned int i = 0;
neighbours[i++] = g->cells[row * g->cols + col];
unsigned int left_most = (col > 0) ? col - 1 : 0;
unsigned int right_most = (col + 1 < g->cols) ? col + 1 : col;
if (row > 0) {
for (unsigned int c = left_most; c <= right_most; c++) {
neighbours[i++] = g->cells[(row - 1) * g->cols + c];
}
}
if (left_most < col)
neighbours[i++] = g->cells[row * g->cols + left_most];
if (right_most > col)
neighbours[i++] = g->cells[row * g->cols + right_most];
if (row + 1 < g->rows) {
for (unsigned int c = left_most; c <= right_most; c++) {
neighbours[i++] = g->cells[(row + 1) * g->cols + c];
}
}
while (i < 9)
neighbours[i++] = NULL;
return neighbours;
}
typedef struct {
float2 pos;
float2 waypoint;
float vel;
int pause;
float so;
} random_waypoint_node;
typedef struct {
float2 pos;
float direction;
float vel;
int pause;
float so;
} random_direction_node;
typedef struct {
float x_lower;
float x_upper;
float y_lower;
float y_upper;
} world_aabb;
typedef struct {
world_aabb world;
float max_velocity;
float min_so, max_so;
unsigned int pause;
} random_waypoint_config;
__device__ float random_float_in_range(curandState *rand_state,
int i,
float lower,
float upper
) {
return curand_uniform(&rand_state[i]) * (upper - lower) + lower;
}
__device__ unsigned int random_int_in_range(curandState *rand_state,
int i,
unsigned int lower,
unsigned int upper
) {
return curand(&rand_state[i]) % (upper - lower) + lower;
}
__global__ void grid_init(grid *grid,
unsigned int num_nodes
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i == 0) {
printf("grid_alloc_cells\n");
grid_alloc_cells(grid);
}
}
__global__ void random_waypoint_init(curandState *rand_state,
random_waypoint_config* config,
random_waypoint_node* nodes,
grid *grid,
unsigned int num_nodes
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_nodes) {
curand_init(clock64(), i, 0, &rand_state[i]);
nodes[i].pos = make_float2(random_float_in_range(rand_state,
i,
config->world.x_lower,
config->world.x_upper
),
random_float_in_range(rand_state,
i,
config->world.y_lower,
config->world.y_upper
)
);
nodes[i].waypoint = nodes[i].pos;
nodes[i].vel = 0.0;
nodes[i].pause = 1;
nodes[i].so = random_float_in_range(rand_state,
i,
config->min_so,
config->max_so
);
grid_add_node(grid, nodes[i].pos, i);
printf("%d pos %f, %f\n", i, nodes[i].pos.x, nodes[i].pos.y);
}
}
__device__ bool random_waypoint_is_safe(random_waypoint_node *nodes,
grid *grid,
int i,
float2 pos
) {
linked_list **neighbours = grid_neighbours(grid, pos);
unsigned int count = 0;
for (unsigned int j = 0; j < 9; j++)
if (neighbours[j]) {
count++;
linked_list_node *n = ll_first(neighbours[j]);
while (n != neighbours[j]->tail) {
if (n->data != i) {
float dx = nodes[n->data].pos.x - pos.x;
float dy = nodes[n->data].pos.y - pos.y;
float dsq = dx * dx + dy * dy;
float safe_d = nodes[i].so + nodes[n->data].vel + nodes[n->data].so;
//printf("dsq %f, safe_d**2 %f\n", dsq, safe_d * safe_d);
if (dsq < (safe_d * safe_d)) {
/*
printf("neighbour at %f, %f is too close to %f, %f\n",
nodes[n->data].pos.x,
nodes[n->data].pos.y,
nodes[i].pos.x,
nodes[i].pos.y
);
*/
free(neighbours);
return false;
}
}
n = n->next;
}
}
//printf("%d neighbour regions, all safe\n", count);
free(neighbours);
return true;
}
__device__ bool random_direction_is_safe(random_direction_node *nodes,
grid *grid,
int i,
float2 pos
) {
linked_list **neighbours = grid_neighbours(grid, pos);
unsigned int count = 0;
for (unsigned int j = 0; j < 9; j++) {
//printf("neighbours %x\n", neighbours[j]);
if (neighbours[j]) {
count++;
linked_list_node *n = ll_first(neighbours[j]);
while (n && n != neighbours[j]->tail) {
//printf("neighbour %x\n", n);
while (_ll_is_marked(n->next)) {
//printf("skipping %x\n", n);
n = _ll_unmarked(n->next);
}
if (n) {
if (n->data != i) {
float dx = nodes[n->data].pos.x - pos.x;
float dy = nodes[n->data].pos.y - pos.y;
float dsq = dx * dx + dy * dy;
float safe_d = nodes[i].so + nodes[n->data].vel + nodes[n->data].so;
//printf("dsq %f, safe_d**2 %f\n", dsq, safe_d * safe_d);
if (dsq < (safe_d * safe_d)) {
/*
printf("neighbour at %f, %f is too close to %f, %f\n",
nodes[n->data].pos.x,
nodes[n->data].pos.y,
nodes[i].pos.x,
nodes[i].pos.y
);
*/
free(neighbours);
return false;
}
}
n = _ll_unmarked(n->next);
}
}
}
}
//printf("%d neighbour regions, all safe\n", count);
free(neighbours);
return true;
}
__global__ void random_waypoint_step(curandState *rand_state,
random_waypoint_config* config,
random_waypoint_node* nodes,
grid *grid,
unsigned int num_nodes
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_nodes) {
//*
if (nodes[i].pause > 0) {
if (--nodes[i].pause <= 0) {
nodes[i].waypoint =
make_float2(random_float_in_range(rand_state,
i,
config->world.x_lower,
config->world.x_upper
),
random_float_in_range(rand_state,
i,
config->world.y_lower,
config->world.y_upper
)
);
nodes[i].vel = curand_uniform(&rand_state[i]) * config->max_velocity;
}
} else {
float2 to_waypoint = make_float2(nodes[i].waypoint.x - nodes[i].pos.x,
nodes[i].waypoint.y - nodes[i].pos.y
);
float d = sqrt(to_waypoint.x * to_waypoint.x
+ to_waypoint.y * to_waypoint.y
);
if (d <= nodes[i].vel) {
nodes[i].pos = nodes[i].waypoint;
nodes[i].pause = config->pause;
} else {
to_waypoint.x *= nodes[i].vel / d;
to_waypoint.y *= nodes[i].vel / d;
float2 candidate = make_float2(nodes[i].pos.x + to_waypoint.x,
nodes[i].pos.y + to_waypoint.y
);
if (random_waypoint_is_safe(nodes, grid, i, candidate)) {
//printf("moving %d\n", i);
if (grid_remove_node(grid, nodes[i].pos, i))
grid_add_node(grid, candidate, i);
nodes[i].pos = candidate;
} else {
printf("can't move %d\n", i);
}
}
}
//*/
}
}
// random direction
__global__ void random_direction_init(curandState *rand_state,
random_waypoint_config* config,
random_direction_node* nodes,
grid *grid,
unsigned int num_nodes
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
//*
if (i < num_nodes) {
curand_init(clock64(), i, 0, &rand_state[i]);
nodes[i].pos = make_float2(random_float_in_range(rand_state,
i,
config->world.x_lower,
config->world.x_upper
),
random_float_in_range(rand_state,
i,
config->world.y_lower,
config->world.y_upper
)
);
nodes[i].direction = 0.0;
nodes[i].vel = 0.0;
nodes[i].pause = 1;
nodes[i].so = random_float_in_range(rand_state,
i,
config->min_so,
config->max_so
);
linked_list *cell = grid_add_node(grid, nodes[i].pos, i);
}
//*/
}
__device__ float2 cut(float2 p1, float2 p2, float2 p3, float2 p4) {
return make_float2((
(p4.x - p3.x) * (p2.x * p1.y - p1.x * p2.y)
- (p2.x - p1.x) * (p4.x * p3.y - p3.x * p4.y)
)
/
(
(p4.y - p3.y) * (p2.x - p1.x)
- (p2.y - p1.y) * (p4.x - p3.x)
),
(
(p1.y - p2.y) * (p4.x * p3.y - p3.x * p4.y)
- (p3.y - p4.y) * (p2.x * p1.y - p1.x * p2.y)
)
/
(
(p4.y - p3.y) * (p2.x - p1.x)
- (p2.y - p1.y) * (p4.x - p3.x)
)
);
}
__device__ float distance_squared(float2 p1, float2 p2) {
float dx = p1.x - p2.x;
float dy = p1.y - p2.y;
return dx * dx + dy * dy;
}
__global__ void random_direction_step(curandState *rand_state,
random_waypoint_config* config,
random_direction_node* nodes,
grid *grid,
unsigned int num_nodes
) {
printf("%d * %d + %d\n", blockDim.x, blockIdx.x, threadIdx.x);
int i = blockDim.x * blockIdx.x + threadIdx.x;
printf("%d\n", i);
float2 top_left = make_float2(config->world.x_lower, config->world.y_lower);
float2 bottom_left = make_float2(config->world.x_lower, config->world.y_upper);
float2 top_right = make_float2(config->world.x_upper, config->world.y_lower);
float2 bottom_right = make_float2(config->world.x_upper, config->world.y_upper);
//printf("tl %f, %f\n", top_left.x, top_left.y);
//printf("bl %f, %f\n", bottom_left.x, bottom_left.y);
//printf("tr %f, %f\n", top_right.x, top_right.y);
//printf("br %f, %f\n", bottom_right.x, bottom_right.y);
if (i < num_nodes) {
printf("node %d\n", i);
if (nodes[i].pause > 0) {
if (--nodes[i].pause <= 0) {
printf("%d: choose new direction\n", i);
nodes[i].direction =
random_float_in_range(rand_state,
i,
0,
2 * M_PI
);
nodes[i].vel = curand_uniform(&rand_state[i]) * config->max_velocity;
}
} else {
float2 forward = make_float2(cos(nodes[i].direction),
sin(nodes[i].direction)
);
float2 candidate = make_float2(nodes[i].pos.x + forward.x,
nodes[i].pos.y + forward.y
);
printf("candidate for %d: %f, %f\n", i, candidate.x, candidate.y);
// out of bounds?
bool oob_left = candidate.x < config->world.x_lower;
bool oob_right = candidate.x > config->world.x_upper;
bool oob_up = candidate.y < config->world.y_lower;
bool oob_down = candidate.y > config->world.y_upper;
//printf("%d: %f, %f | oob? l%d r%d u%d d%d\n",
// i,
// candidate.x,
// candidate.y,
// oob_left,
// oob_right,
// oob_up,
// oob_down
// );
if (oob_left) {
if (oob_up) {
float2 c1 = cut(nodes[i].pos,
candidate,
top_left,
bottom_left
);
float2 c2 = cut(nodes[i].pos,
candidate,
top_left,
top_right
);
float d1 = distance_squared(candidate, c1);
float d2 = distance_squared(candidate, c2);
if (d1 < d2)
candidate = c1;
else
candidate = c2;
}
else if (oob_down) {
float2 c1 = cut(nodes[i].pos,
candidate,
top_left,
bottom_left
);
float2 c2 = cut(nodes[i].pos,
candidate,
bottom_left,
bottom_right
);
float d1 = distance_squared(candidate, c1);
float d2 = distance_squared(candidate, c2);
if (d1 < d2)
candidate = c1;
else
candidate = c2;
}
else {
candidate = cut(nodes[i].pos,
candidate,
top_left,
bottom_left
);
}
} else if (oob_right) {
if (oob_up) {
float2 c1 = cut(nodes[i].pos,
candidate,
top_right,
bottom_right
);
float2 c2 = cut(nodes[i].pos,
candidate,
top_left,
top_right
);
float d1 = distance_squared(candidate, c1);
float d2 = distance_squared(candidate, c2);
if (d1 < d2)
candidate = c1;
else
candidate = c2;
}
else if (oob_down) {
float2 c1 = cut(nodes[i].pos,
candidate,
top_right,
bottom_right
);
float2 c2 = cut(nodes[i].pos,
candidate,
bottom_left,
bottom_right
);
float d1 = distance_squared(candidate, c1);
float d2 = distance_squared(candidate, c2);
if (d1 < d2)
candidate = c1;
else
candidate = c2;
}
else {
candidate = cut(nodes[i].pos,
candidate,
top_right,
bottom_right
);
}
} else if (oob_up) {
candidate = cut(nodes[i].pos,
candidate,
top_left,
top_right
);
} else if (oob_down) {
candidate = cut(nodes[i].pos,
candidate,
bottom_left,
bottom_right
);
}
__syncthreads();
if (random_direction_is_safe(nodes, grid, i, candidate)) {
//printf("moving %d\n", i);
if (grid_remove_node(grid, nodes[i].pos, i))
grid_add_node(grid, candidate, i);
nodes[i].pos = candidate;
if (oob_left || oob_right || oob_up || oob_down) {
nodes[i].pause = config->pause;
printf("clamped to %f, %f\n", candidate.x, candidate.y);
}
} else {
printf("can't move %d\n", i);
}
}
}
}
int main(int argc, char **argv) {
cudaError_t err;
unsigned int num_threads = 32; // 1024;
unsigned int num_blocks = 1;
unsigned int num_nodes = num_threads * num_blocks;
unsigned int num_frames = 10;
random_waypoint_node *h_nodes;
//random_direction_node *h_nodes;
random_waypoint_config h_config;
h_config.world.x_lower = -10.0;
h_config.world.x_upper = 10.0;
h_config.world.y_lower = -10.0;
h_config.world.y_upper = 10.0;
h_config.max_velocity = 0.5;
h_config.pause = 2;
h_config.min_so = 0.01;
h_config.max_so = 0.1;
float cell_size = h_config.max_so * 2 + h_config.max_velocity;
unsigned int grid_cols =
ceil((h_config.world.x_upper - h_config.world.x_lower) / cell_size);
unsigned int grid_rows =
ceil((h_config.world.y_upper - h_config.world.y_lower) / cell_size);
printf("%d nodes\n", num_nodes);
//printf("grid: %d x %d\n", grid_cols, grid_rows);
random_waypoint_node *d_nodes;
//random_direction_node *d_nodes;
random_waypoint_config *d_config;
grid *d_grid;
curandState *d_rand_state;
h_nodes = (random_waypoint_node *)
malloc(num_nodes * sizeof(random_waypoint_node));
/*
h_nodes = (random_direction_node *)
malloc(num_nodes * sizeof(random_direction_node));
*/
cudaMalloc((void **) &d_nodes, num_nodes * sizeof(random_waypoint_node));
//cudaMalloc((void **) &d_nodes, num_nodes * sizeof(random_direction_node));
cudaMalloc((void **) &d_config, sizeof(random_waypoint_config));
cudaMalloc((void **) &d_rand_state, num_threads * sizeof(curandState));
cudaMemcpy(d_config,
&h_config,
sizeof(random_waypoint_config),
cudaMemcpyHostToDevice
);
d_grid = grid_host_create(h_config.world.x_lower,
h_config.world.y_lower,
cell_size,
cell_size,
grid_cols,
grid_rows
);
printf("d_grid %x\n", d_grid);
printf("init\n");
printf("grid_init\n");
grid_init<<<num_blocks, num_threads>>>(d_grid,
num_nodes
);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("ERR: %s\n", cudaGetErrorString(err));
printf("random_waypoint_init\n");
random_waypoint_init<<<num_blocks, num_threads>>>(d_rand_state,
//random_direction_init<<<num_blocks, num_threads>>>(d_rand_state,
d_config,
d_nodes,
d_grid,
num_nodes
);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("ERR: %s\n", cudaGetErrorString(err));
printf("/init\n");
for (unsigned int i = 0; i < num_frames; i++) {
printf("\nframe %u\n", i);
//*
random_waypoint_step<<<num_blocks, num_threads>>>(d_rand_state,
//random_direction_step<<<num_blocks, num_threads>>>(d_rand_state,
d_config,
d_nodes,
d_grid,
num_nodes
);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("ERR: %s\n", cudaGetErrorString(err));
//*/
printf("memcpy %x (device) to %x (host)\n", d_nodes, h_nodes);
cudaMemcpy(h_nodes,
d_nodes,
num_nodes * sizeof(random_waypoint_node),
//num_nodes * sizeof(random_direction_node),
cudaMemcpyDeviceToHost
);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("ERR: %s\n", cudaGetErrorString(err));
for (unsigned int i = 0; i < num_nodes; i++) {
printf("%u: %f,%f (v = %f) -> %f, %f | p = %d\n",
i,
h_nodes[i].pos.x,
h_nodes[i].pos.y,
h_nodes[i].vel,
//h_nodes[i].direction,
h_nodes[i].waypoint.x,
h_nodes[i].waypoint.y,
h_nodes[i].pause
);
}
}
cudaFree(d_rand_state);
cudaFree(d_config);
cudaFree(d_nodes);
free(h_nodes);
exit(EXIT_SUCCESS);
} | #include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
struct struct_linked_list_node;
typedef struct struct_linked_list_node {
int data;
struct struct_linked_list_node *next;
} linked_list_node;
typedef struct {
linked_list_node *head, *tail;
} linked_list;
__device__ bool _ll_is_marked(linked_list_node *n) {
return ((unsigned long long int) n & 1);
}
__device__ linked_list_node *_ll_marked(linked_list_node *n) {
return (linked_list_node *) ((unsigned long long int) n | 1);
}
__device__ linked_list_node *_ll_unmarked(linked_list_node *n) {
return (linked_list_node *) ((unsigned long long int) n & (~1));
}
__device__ linked_list_node *ll_create_node(int data) {
linked_list_node *lln = (linked_list_node *) malloc(sizeof(linked_list_node));
lln->data = data;
lln->next = NULL;
return lln;
}
__device__ linked_list_node *ll_first(linked_list *ll) {
return ll->head->next;
}
__device__ void ll_reset(linked_list *ll) {
ll->head = ll_create_node(INT_MIN);
ll->tail = ll_create_node(INT_MAX);
ll->head->next = ll->tail;
}
__device__ linked_list *ll_create() {
linked_list *ll = (linked_list *) malloc(sizeof(linked_list));
ll_reset(ll);
return ll;
}
__device__ void ll_print(linked_list *ll) {
linked_list_node *n = ll->head;
while (n) {
printf("%d @ ", n->data);
printf("%x\n", n);
n = _ll_unmarked(n->next);
}
}
__device__ void _ll_free_nodes(linked_list_node *from, linked_list_node *to) {
//*
from = _ll_unmarked(from);
while (from && from != to) {
linked_list_node *next = from->next;
free(from);
from = _ll_unmarked(next);
}
//*/
}
__device__ linked_list_node *ll_search(linked_list *ll,
int data,
linked_list_node **left_node
) {
linked_list_node *left_node_next, *right_node;
while (true) {
linked_list_node *t = ll->head;
linked_list_node *t_next = ll->head->next;
do {
if (!_ll_is_marked(t_next)) {
*left_node = t;
left_node_next = t_next;
}
t = _ll_unmarked(t_next);
if (t == ll->tail)
break;
t_next = t->next;
} while (_ll_is_marked(t_next) || (t->data < data));
right_node = t;
if (left_node_next == right_node)
if ((right_node != ll->tail) && _ll_is_marked(right_node->next))
continue;
else
return right_node;
unsigned long long int old = (unsigned long long int) left_node_next;
if (old == atomicCAS((unsigned long long int *) &((*left_node)->next),
old,
(unsigned long long int ) right_node
)
) {
_ll_free_nodes((linked_list_node *) old, right_node);
if ((right_node != ll->tail) && _ll_is_marked(right_node->next))
continue;
else
return right_node;
}
}
}
__device__ bool ll_insert(linked_list *ll, int data) {
linked_list_node *new_node = ll_create_node(data);
linked_list_node *right_node, *left_node;
while (true) {
right_node = ll_search(ll, data, &left_node);
if ((right_node != ll->tail) && (right_node->data == data))
return false;
new_node->next = right_node;
unsigned long long int old = (unsigned long long int) right_node;
if (old == atomicCAS((unsigned long long int *) &(left_node->next),
old,
(unsigned long long int ) new_node
)
)
return true;
}
}
__device__ bool ll_remove(linked_list *ll, int data) {
linked_list_node * right_node, *right_node_next, *left_node;
//printf("ll_remove(%d)\n", data);
while (true) {
right_node = ll_search(ll, data, &left_node);
//printf("ll_remove: found %d\n", right_node->data);
if ((right_node == ll->tail) || (right_node->data != data))
return false;
right_node_next = right_node->next;
if (!_ll_is_marked(right_node_next)) {
unsigned long long int old = (unsigned long long int) right_node_next;
if (old == atomicCAS((unsigned long long int *) &(right_node->next),
old,
(unsigned long long int ) _ll_marked(right_node_next)
)
)
break;
}
}
unsigned long long int old = (unsigned long long int) right_node;
if (old == atomicCAS((unsigned long long int *) &(left_node->next),
old,
(unsigned long long int ) right_node_next
)
)
_ll_free_nodes((linked_list_node *) old, right_node_next);
else
right_node = ll_search(ll, right_node->data, &left_node);
return true;
}
// call ONCE!
__device__ void ll_free(linked_list *ll) {
// todo
if (ll) {
linked_list_node *lln = ll->head;
while (lln) {
linked_list_node *next = lln->next;
free(lln);
lln = next;
}
free(ll);
ll = NULL;
}
}
__device__ void ll_free_safe(linked_list *ll) {
if (ll) {
linked_list_node *lln = _ll_unmarked(ll->head);
while (lln) {
linked_list_node *next = _ll_unmarked(lln->next);
free(lln);
lln = next;
}
free(ll);
ll = NULL;
}
}
__device__ void ll_to_array(linked_list *ll, int *arr, unsigned int l) {
linked_list_node *node = ll->head;
if (node) // skip head
node = node->next;
unsigned int i = 0;
while (node && i < l) {
node = node->next;
arr[i++] = node->data;
}
// fill remaining cells with 0, if any
while (i < l)
arr[i++] = 13373; // todo
}
__device__ void ll_to_array_safe(linked_list *ll, int *arr, unsigned int l) {
linked_list_node *node = ll->head;
linked_list_node *unmarked = _ll_unmarked(node);
if (node) { // skip head
node = node->next;
unmarked = _ll_unmarked(node);
}
unsigned int i = 0;
while (unmarked && i < l) {
if (!_ll_is_marked(unmarked->next))
arr[i++] = unmarked->data;
node = unmarked->next;
unmarked = _ll_unmarked(node);
}
while (i < l)
arr[i++] = 13373; // todo
}
// grid
typedef struct {
float lower_x, lower_y, cell_width, cell_height;
unsigned int cols, rows;
linked_list **cells;
} grid;
/*
__device__ grid *grid_create() {
grid *g = (grid *) malloc(sizeof(grid));
return g;
}
*/
grid *grid_host_create(float lower_x,
float lower_y,
float cell_width,
float cell_height,
unsigned int cols,
unsigned int rows
) {
grid *d_grid, h_grid;
hipMalloc((void **) &d_grid, sizeof(grid));
h_grid.lower_x = lower_x;
h_grid.lower_y = lower_y;
h_grid.cell_width = cell_width;
h_grid.cell_height = cell_height;
h_grid.cols = cols;
h_grid.rows = rows;
linked_list **cells;
hipMalloc((void **) &cells, rows * cols * sizeof(linked_list *));
h_grid.cells = cells;
printf("h_grid.cells %x\n", cells);
printf("h_grid.cell_height %f\n", h_grid.cell_height);
printf("h_grid.cell_width %f\n", h_grid.cell_width);
hipMemcpy(d_grid,
&h_grid,
sizeof(grid),
hipMemcpyHostToDevice
);
return d_grid;
}
__device__ void grid_alloc_cells(grid *g) {
unsigned int num_cells = g->rows * g->cols;
printf("num_cells %d\n", num_cells);
printf("grid.cells %x\n", g->cells);
for (unsigned int i = 0; i < num_cells; i++) {
g->cells[i] = ll_create();
//printf("created cell ll at %x\n", g->cells[i]);
}
}
__device__ linked_list *_grid_cell(grid *g, float2 pos) {
//printf("x %f\n", pos.x);
//printf("y %f\n", pos.y);
//printf("cell_width %f\n", g->cell_width);
//printf("cell_height %f\n", g->cell_height);
unsigned int col = floor((pos.x - g->lower_x) / g->cell_width);
unsigned int row = floor((pos.y - g->lower_y) / g->cell_height);
//printf("col %d\n", col);
//printf("row %d\n", row);
unsigned int i = row * (g->cols) + col;
if (col < g->cols && row < g->rows) {
//printf("_grid_cell: %x\n", g->cells[i]);
return g->cells[i];
} else {
printf("_grid_cell: NULL\n");
return NULL;
}
}
__device__ linked_list *grid_add_node(grid *g, float2 pos, int n) {
linked_list *cell = _grid_cell(g, pos);
/*
printf("not calling ll_insert\n");
return cell;
//*/
/*
printf("cell at %x\n", cell);
return cell;
*/
if (cell) {
//printf("calling ll_insert NOT\n");
ll_insert(cell, n);
}
return cell;
}
__device__ bool grid_remove_node(grid *g, float2 pos, int n) {
linked_list *cell = _grid_cell(g, pos);
if (cell)
return ll_remove(cell, n);
else
return false;
}
__device__ linked_list **grid_neighbours(grid *g, float2 pos) {
unsigned int col = floor((pos.x - g->lower_x) / g->cell_width);
unsigned int row = floor((pos.y - g->lower_y) / g->cell_height);
if (col >= g->cols || row >= g->rows) {
printf("this cell does not exist: col %d, row %d\n", col, row);
printf("no such cell, pos: %f, %f\n", pos.x, pos.y);
return NULL;
}
linked_list **neighbours = (linked_list **) malloc(9 * sizeof(linked_list *));
unsigned int i = 0;
neighbours[i++] = g->cells[row * g->cols + col];
unsigned int left_most = (col > 0) ? col - 1 : 0;
unsigned int right_most = (col + 1 < g->cols) ? col + 1 : col;
if (row > 0) {
for (unsigned int c = left_most; c <= right_most; c++) {
neighbours[i++] = g->cells[(row - 1) * g->cols + c];
}
}
if (left_most < col)
neighbours[i++] = g->cells[row * g->cols + left_most];
if (right_most > col)
neighbours[i++] = g->cells[row * g->cols + right_most];
if (row + 1 < g->rows) {
for (unsigned int c = left_most; c <= right_most; c++) {
neighbours[i++] = g->cells[(row + 1) * g->cols + c];
}
}
while (i < 9)
neighbours[i++] = NULL;
return neighbours;
}
typedef struct {
float2 pos;
float2 waypoint;
float vel;
int pause;
float so;
} random_waypoint_node;
typedef struct {
float2 pos;
float direction;
float vel;
int pause;
float so;
} random_direction_node;
typedef struct {
float x_lower;
float x_upper;
float y_lower;
float y_upper;
} world_aabb;
typedef struct {
world_aabb world;
float max_velocity;
float min_so, max_so;
unsigned int pause;
} random_waypoint_config;
__device__ float random_float_in_range(hiprandState *rand_state,
int i,
float lower,
float upper
) {
return hiprand_uniform(&rand_state[i]) * (upper - lower) + lower;
}
__device__ unsigned int random_int_in_range(hiprandState *rand_state,
int i,
unsigned int lower,
unsigned int upper
) {
return hiprand(&rand_state[i]) % (upper - lower) + lower;
}
__global__ void grid_init(grid *grid,
unsigned int num_nodes
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i == 0) {
printf("grid_alloc_cells\n");
grid_alloc_cells(grid);
}
}
__global__ void random_waypoint_init(hiprandState *rand_state,
random_waypoint_config* config,
random_waypoint_node* nodes,
grid *grid,
unsigned int num_nodes
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_nodes) {
hiprand_init(clock64(), i, 0, &rand_state[i]);
nodes[i].pos = make_float2(random_float_in_range(rand_state,
i,
config->world.x_lower,
config->world.x_upper
),
random_float_in_range(rand_state,
i,
config->world.y_lower,
config->world.y_upper
)
);
nodes[i].waypoint = nodes[i].pos;
nodes[i].vel = 0.0;
nodes[i].pause = 1;
nodes[i].so = random_float_in_range(rand_state,
i,
config->min_so,
config->max_so
);
grid_add_node(grid, nodes[i].pos, i);
printf("%d pos %f, %f\n", i, nodes[i].pos.x, nodes[i].pos.y);
}
}
__device__ bool random_waypoint_is_safe(random_waypoint_node *nodes,
grid *grid,
int i,
float2 pos
) {
linked_list **neighbours = grid_neighbours(grid, pos);
unsigned int count = 0;
for (unsigned int j = 0; j < 9; j++)
if (neighbours[j]) {
count++;
linked_list_node *n = ll_first(neighbours[j]);
while (n != neighbours[j]->tail) {
if (n->data != i) {
float dx = nodes[n->data].pos.x - pos.x;
float dy = nodes[n->data].pos.y - pos.y;
float dsq = dx * dx + dy * dy;
float safe_d = nodes[i].so + nodes[n->data].vel + nodes[n->data].so;
//printf("dsq %f, safe_d**2 %f\n", dsq, safe_d * safe_d);
if (dsq < (safe_d * safe_d)) {
/*
printf("neighbour at %f, %f is too close to %f, %f\n",
nodes[n->data].pos.x,
nodes[n->data].pos.y,
nodes[i].pos.x,
nodes[i].pos.y
);
*/
free(neighbours);
return false;
}
}
n = n->next;
}
}
//printf("%d neighbour regions, all safe\n", count);
free(neighbours);
return true;
}
__device__ bool random_direction_is_safe(random_direction_node *nodes,
grid *grid,
int i,
float2 pos
) {
linked_list **neighbours = grid_neighbours(grid, pos);
unsigned int count = 0;
for (unsigned int j = 0; j < 9; j++) {
//printf("neighbours %x\n", neighbours[j]);
if (neighbours[j]) {
count++;
linked_list_node *n = ll_first(neighbours[j]);
while (n && n != neighbours[j]->tail) {
//printf("neighbour %x\n", n);
while (_ll_is_marked(n->next)) {
//printf("skipping %x\n", n);
n = _ll_unmarked(n->next);
}
if (n) {
if (n->data != i) {
float dx = nodes[n->data].pos.x - pos.x;
float dy = nodes[n->data].pos.y - pos.y;
float dsq = dx * dx + dy * dy;
float safe_d = nodes[i].so + nodes[n->data].vel + nodes[n->data].so;
//printf("dsq %f, safe_d**2 %f\n", dsq, safe_d * safe_d);
if (dsq < (safe_d * safe_d)) {
/*
printf("neighbour at %f, %f is too close to %f, %f\n",
nodes[n->data].pos.x,
nodes[n->data].pos.y,
nodes[i].pos.x,
nodes[i].pos.y
);
*/
free(neighbours);
return false;
}
}
n = _ll_unmarked(n->next);
}
}
}
}
//printf("%d neighbour regions, all safe\n", count);
free(neighbours);
return true;
}
__global__ void random_waypoint_step(hiprandState *rand_state,
random_waypoint_config* config,
random_waypoint_node* nodes,
grid *grid,
unsigned int num_nodes
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_nodes) {
//*
if (nodes[i].pause > 0) {
if (--nodes[i].pause <= 0) {
nodes[i].waypoint =
make_float2(random_float_in_range(rand_state,
i,
config->world.x_lower,
config->world.x_upper
),
random_float_in_range(rand_state,
i,
config->world.y_lower,
config->world.y_upper
)
);
nodes[i].vel = hiprand_uniform(&rand_state[i]) * config->max_velocity;
}
} else {
float2 to_waypoint = make_float2(nodes[i].waypoint.x - nodes[i].pos.x,
nodes[i].waypoint.y - nodes[i].pos.y
);
float d = sqrt(to_waypoint.x * to_waypoint.x
+ to_waypoint.y * to_waypoint.y
);
if (d <= nodes[i].vel) {
nodes[i].pos = nodes[i].waypoint;
nodes[i].pause = config->pause;
} else {
to_waypoint.x *= nodes[i].vel / d;
to_waypoint.y *= nodes[i].vel / d;
float2 candidate = make_float2(nodes[i].pos.x + to_waypoint.x,
nodes[i].pos.y + to_waypoint.y
);
if (random_waypoint_is_safe(nodes, grid, i, candidate)) {
//printf("moving %d\n", i);
if (grid_remove_node(grid, nodes[i].pos, i))
grid_add_node(grid, candidate, i);
nodes[i].pos = candidate;
} else {
printf("can't move %d\n", i);
}
}
}
//*/
}
}
// random direction
__global__ void random_direction_init(hiprandState *rand_state,
random_waypoint_config* config,
random_direction_node* nodes,
grid *grid,
unsigned int num_nodes
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
//*
if (i < num_nodes) {
hiprand_init(clock64(), i, 0, &rand_state[i]);
nodes[i].pos = make_float2(random_float_in_range(rand_state,
i,
config->world.x_lower,
config->world.x_upper
),
random_float_in_range(rand_state,
i,
config->world.y_lower,
config->world.y_upper
)
);
nodes[i].direction = 0.0;
nodes[i].vel = 0.0;
nodes[i].pause = 1;
nodes[i].so = random_float_in_range(rand_state,
i,
config->min_so,
config->max_so
);
linked_list *cell = grid_add_node(grid, nodes[i].pos, i);
}
//*/
}
__device__ float2 cut(float2 p1, float2 p2, float2 p3, float2 p4) {
return make_float2((
(p4.x - p3.x) * (p2.x * p1.y - p1.x * p2.y)
- (p2.x - p1.x) * (p4.x * p3.y - p3.x * p4.y)
)
/
(
(p4.y - p3.y) * (p2.x - p1.x)
- (p2.y - p1.y) * (p4.x - p3.x)
),
(
(p1.y - p2.y) * (p4.x * p3.y - p3.x * p4.y)
- (p3.y - p4.y) * (p2.x * p1.y - p1.x * p2.y)
)
/
(
(p4.y - p3.y) * (p2.x - p1.x)
- (p2.y - p1.y) * (p4.x - p3.x)
)
);
}
__device__ float distance_squared(float2 p1, float2 p2) {
float dx = p1.x - p2.x;
float dy = p1.y - p2.y;
return dx * dx + dy * dy;
}
__global__ void random_direction_step(hiprandState *rand_state,
random_waypoint_config* config,
random_direction_node* nodes,
grid *grid,
unsigned int num_nodes
) {
printf("%d * %d + %d\n", blockDim.x, blockIdx.x, threadIdx.x);
int i = blockDim.x * blockIdx.x + threadIdx.x;
printf("%d\n", i);
float2 top_left = make_float2(config->world.x_lower, config->world.y_lower);
float2 bottom_left = make_float2(config->world.x_lower, config->world.y_upper);
float2 top_right = make_float2(config->world.x_upper, config->world.y_lower);
float2 bottom_right = make_float2(config->world.x_upper, config->world.y_upper);
//printf("tl %f, %f\n", top_left.x, top_left.y);
//printf("bl %f, %f\n", bottom_left.x, bottom_left.y);
//printf("tr %f, %f\n", top_right.x, top_right.y);
//printf("br %f, %f\n", bottom_right.x, bottom_right.y);
if (i < num_nodes) {
printf("node %d\n", i);
if (nodes[i].pause > 0) {
if (--nodes[i].pause <= 0) {
printf("%d: choose new direction\n", i);
nodes[i].direction =
random_float_in_range(rand_state,
i,
0,
2 * M_PI
);
nodes[i].vel = hiprand_uniform(&rand_state[i]) * config->max_velocity;
}
} else {
float2 forward = make_float2(cos(nodes[i].direction),
sin(nodes[i].direction)
);
float2 candidate = make_float2(nodes[i].pos.x + forward.x,
nodes[i].pos.y + forward.y
);
printf("candidate for %d: %f, %f\n", i, candidate.x, candidate.y);
// out of bounds?
bool oob_left = candidate.x < config->world.x_lower;
bool oob_right = candidate.x > config->world.x_upper;
bool oob_up = candidate.y < config->world.y_lower;
bool oob_down = candidate.y > config->world.y_upper;
//printf("%d: %f, %f | oob? l%d r%d u%d d%d\n",
// i,
// candidate.x,
// candidate.y,
// oob_left,
// oob_right,
// oob_up,
// oob_down
// );
if (oob_left) {
if (oob_up) {
float2 c1 = cut(nodes[i].pos,
candidate,
top_left,
bottom_left
);
float2 c2 = cut(nodes[i].pos,
candidate,
top_left,
top_right
);
float d1 = distance_squared(candidate, c1);
float d2 = distance_squared(candidate, c2);
if (d1 < d2)
candidate = c1;
else
candidate = c2;
}
else if (oob_down) {
float2 c1 = cut(nodes[i].pos,
candidate,
top_left,
bottom_left
);
float2 c2 = cut(nodes[i].pos,
candidate,
bottom_left,
bottom_right
);
float d1 = distance_squared(candidate, c1);
float d2 = distance_squared(candidate, c2);
if (d1 < d2)
candidate = c1;
else
candidate = c2;
}
else {
candidate = cut(nodes[i].pos,
candidate,
top_left,
bottom_left
);
}
} else if (oob_right) {
if (oob_up) {
float2 c1 = cut(nodes[i].pos,
candidate,
top_right,
bottom_right
);
float2 c2 = cut(nodes[i].pos,
candidate,
top_left,
top_right
);
float d1 = distance_squared(candidate, c1);
float d2 = distance_squared(candidate, c2);
if (d1 < d2)
candidate = c1;
else
candidate = c2;
}
else if (oob_down) {
float2 c1 = cut(nodes[i].pos,
candidate,
top_right,
bottom_right
);
float2 c2 = cut(nodes[i].pos,
candidate,
bottom_left,
bottom_right
);
float d1 = distance_squared(candidate, c1);
float d2 = distance_squared(candidate, c2);
if (d1 < d2)
candidate = c1;
else
candidate = c2;
}
else {
candidate = cut(nodes[i].pos,
candidate,
top_right,
bottom_right
);
}
} else if (oob_up) {
candidate = cut(nodes[i].pos,
candidate,
top_left,
top_right
);
} else if (oob_down) {
candidate = cut(nodes[i].pos,
candidate,
bottom_left,
bottom_right
);
}
__syncthreads();
if (random_direction_is_safe(nodes, grid, i, candidate)) {
//printf("moving %d\n", i);
if (grid_remove_node(grid, nodes[i].pos, i))
grid_add_node(grid, candidate, i);
nodes[i].pos = candidate;
if (oob_left || oob_right || oob_up || oob_down) {
nodes[i].pause = config->pause;
printf("clamped to %f, %f\n", candidate.x, candidate.y);
}
} else {
printf("can't move %d\n", i);
}
}
}
}
int main(int argc, char **argv) {
hipError_t err;
unsigned int num_threads = 32; // 1024;
unsigned int num_blocks = 1;
unsigned int num_nodes = num_threads * num_blocks;
unsigned int num_frames = 10;
random_waypoint_node *h_nodes;
//random_direction_node *h_nodes;
random_waypoint_config h_config;
h_config.world.x_lower = -10.0;
h_config.world.x_upper = 10.0;
h_config.world.y_lower = -10.0;
h_config.world.y_upper = 10.0;
h_config.max_velocity = 0.5;
h_config.pause = 2;
h_config.min_so = 0.01;
h_config.max_so = 0.1;
float cell_size = h_config.max_so * 2 + h_config.max_velocity;
unsigned int grid_cols =
ceil((h_config.world.x_upper - h_config.world.x_lower) / cell_size);
unsigned int grid_rows =
ceil((h_config.world.y_upper - h_config.world.y_lower) / cell_size);
printf("%d nodes\n", num_nodes);
//printf("grid: %d x %d\n", grid_cols, grid_rows);
random_waypoint_node *d_nodes;
//random_direction_node *d_nodes;
random_waypoint_config *d_config;
grid *d_grid;
hiprandState *d_rand_state;
h_nodes = (random_waypoint_node *)
malloc(num_nodes * sizeof(random_waypoint_node));
/*
h_nodes = (random_direction_node *)
malloc(num_nodes * sizeof(random_direction_node));
*/
hipMalloc((void **) &d_nodes, num_nodes * sizeof(random_waypoint_node));
//cudaMalloc((void **) &d_nodes, num_nodes * sizeof(random_direction_node));
hipMalloc((void **) &d_config, sizeof(random_waypoint_config));
hipMalloc((void **) &d_rand_state, num_threads * sizeof(hiprandState));
hipMemcpy(d_config,
&h_config,
sizeof(random_waypoint_config),
hipMemcpyHostToDevice
);
d_grid = grid_host_create(h_config.world.x_lower,
h_config.world.y_lower,
cell_size,
cell_size,
grid_cols,
grid_rows
);
printf("d_grid %x\n", d_grid);
printf("init\n");
printf("grid_init\n");
grid_init<<<num_blocks, num_threads>>>(d_grid,
num_nodes
);
err = hipGetLastError();
if (err != hipSuccess)
printf("ERR: %s\n", hipGetErrorString(err));
printf("random_waypoint_init\n");
random_waypoint_init<<<num_blocks, num_threads>>>(d_rand_state,
//random_direction_init<<<num_blocks, num_threads>>>(d_rand_state,
d_config,
d_nodes,
d_grid,
num_nodes
);
err = hipGetLastError();
if (err != hipSuccess)
printf("ERR: %s\n", hipGetErrorString(err));
printf("/init\n");
for (unsigned int i = 0; i < num_frames; i++) {
printf("\nframe %u\n", i);
//*
random_waypoint_step<<<num_blocks, num_threads>>>(d_rand_state,
//random_direction_step<<<num_blocks, num_threads>>>(d_rand_state,
d_config,
d_nodes,
d_grid,
num_nodes
);
err = hipGetLastError();
if (err != hipSuccess)
printf("ERR: %s\n", hipGetErrorString(err));
//*/
printf("memcpy %x (device) to %x (host)\n", d_nodes, h_nodes);
hipMemcpy(h_nodes,
d_nodes,
num_nodes * sizeof(random_waypoint_node),
//num_nodes * sizeof(random_direction_node),
hipMemcpyDeviceToHost
);
err = hipGetLastError();
if (err != hipSuccess)
printf("ERR: %s\n", hipGetErrorString(err));
for (unsigned int i = 0; i < num_nodes; i++) {
printf("%u: %f,%f (v = %f) -> %f, %f | p = %d\n",
i,
h_nodes[i].pos.x,
h_nodes[i].pos.y,
h_nodes[i].vel,
//h_nodes[i].direction,
h_nodes[i].waypoint.x,
h_nodes[i].waypoint.y,
h_nodes[i].pause
);
}
}
hipFree(d_rand_state);
hipFree(d_config);
hipFree(d_nodes);
free(h_nodes);
exit(EXIT_SUCCESS);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
struct struct_linked_list_node;
typedef struct struct_linked_list_node {
int data;
struct struct_linked_list_node *next;
} linked_list_node;
typedef struct {
linked_list_node *head, *tail;
} linked_list;
__device__ bool _ll_is_marked(linked_list_node *n) {
return ((unsigned long long int) n & 1);
}
__device__ linked_list_node *_ll_marked(linked_list_node *n) {
return (linked_list_node *) ((unsigned long long int) n | 1);
}
__device__ linked_list_node *_ll_unmarked(linked_list_node *n) {
return (linked_list_node *) ((unsigned long long int) n & (~1));
}
__device__ linked_list_node *ll_create_node(int data) {
linked_list_node *lln = (linked_list_node *) malloc(sizeof(linked_list_node));
lln->data = data;
lln->next = NULL;
return lln;
}
__device__ linked_list_node *ll_first(linked_list *ll) {
return ll->head->next;
}
__device__ void ll_reset(linked_list *ll) {
ll->head = ll_create_node(INT_MIN);
ll->tail = ll_create_node(INT_MAX);
ll->head->next = ll->tail;
}
__device__ linked_list *ll_create() {
linked_list *ll = (linked_list *) malloc(sizeof(linked_list));
ll_reset(ll);
return ll;
}
__device__ void ll_print(linked_list *ll) {
linked_list_node *n = ll->head;
while (n) {
printf("%d @ ", n->data);
printf("%x\n", n);
n = _ll_unmarked(n->next);
}
}
__device__ void _ll_free_nodes(linked_list_node *from, linked_list_node *to) {
//*
from = _ll_unmarked(from);
while (from && from != to) {
linked_list_node *next = from->next;
free(from);
from = _ll_unmarked(next);
}
//*/
}
__device__ linked_list_node *ll_search(linked_list *ll,
int data,
linked_list_node **left_node
) {
linked_list_node *left_node_next, *right_node;
while (true) {
linked_list_node *t = ll->head;
linked_list_node *t_next = ll->head->next;
do {
if (!_ll_is_marked(t_next)) {
*left_node = t;
left_node_next = t_next;
}
t = _ll_unmarked(t_next);
if (t == ll->tail)
break;
t_next = t->next;
} while (_ll_is_marked(t_next) || (t->data < data));
right_node = t;
if (left_node_next == right_node)
if ((right_node != ll->tail) && _ll_is_marked(right_node->next))
continue;
else
return right_node;
unsigned long long int old = (unsigned long long int) left_node_next;
if (old == atomicCAS((unsigned long long int *) &((*left_node)->next),
old,
(unsigned long long int ) right_node
)
) {
_ll_free_nodes((linked_list_node *) old, right_node);
if ((right_node != ll->tail) && _ll_is_marked(right_node->next))
continue;
else
return right_node;
}
}
}
__device__ bool ll_insert(linked_list *ll, int data) {
linked_list_node *new_node = ll_create_node(data);
linked_list_node *right_node, *left_node;
while (true) {
right_node = ll_search(ll, data, &left_node);
if ((right_node != ll->tail) && (right_node->data == data))
return false;
new_node->next = right_node;
unsigned long long int old = (unsigned long long int) right_node;
if (old == atomicCAS((unsigned long long int *) &(left_node->next),
old,
(unsigned long long int ) new_node
)
)
return true;
}
}
__device__ bool ll_remove(linked_list *ll, int data) {
linked_list_node * right_node, *right_node_next, *left_node;
//printf("ll_remove(%d)\n", data);
while (true) {
right_node = ll_search(ll, data, &left_node);
//printf("ll_remove: found %d\n", right_node->data);
if ((right_node == ll->tail) || (right_node->data != data))
return false;
right_node_next = right_node->next;
if (!_ll_is_marked(right_node_next)) {
unsigned long long int old = (unsigned long long int) right_node_next;
if (old == atomicCAS((unsigned long long int *) &(right_node->next),
old,
(unsigned long long int ) _ll_marked(right_node_next)
)
)
break;
}
}
unsigned long long int old = (unsigned long long int) right_node;
if (old == atomicCAS((unsigned long long int *) &(left_node->next),
old,
(unsigned long long int ) right_node_next
)
)
_ll_free_nodes((linked_list_node *) old, right_node_next);
else
right_node = ll_search(ll, right_node->data, &left_node);
return true;
}
// call ONCE!
__device__ void ll_free(linked_list *ll) {
// todo
if (ll) {
linked_list_node *lln = ll->head;
while (lln) {
linked_list_node *next = lln->next;
free(lln);
lln = next;
}
free(ll);
ll = NULL;
}
}
__device__ void ll_free_safe(linked_list *ll) {
if (ll) {
linked_list_node *lln = _ll_unmarked(ll->head);
while (lln) {
linked_list_node *next = _ll_unmarked(lln->next);
free(lln);
lln = next;
}
free(ll);
ll = NULL;
}
}
__device__ void ll_to_array(linked_list *ll, int *arr, unsigned int l) {
linked_list_node *node = ll->head;
if (node) // skip head
node = node->next;
unsigned int i = 0;
while (node && i < l) {
node = node->next;
arr[i++] = node->data;
}
// fill remaining cells with 0, if any
while (i < l)
arr[i++] = 13373; // todo
}
__device__ void ll_to_array_safe(linked_list *ll, int *arr, unsigned int l) {
linked_list_node *node = ll->head;
linked_list_node *unmarked = _ll_unmarked(node);
if (node) { // skip head
node = node->next;
unmarked = _ll_unmarked(node);
}
unsigned int i = 0;
while (unmarked && i < l) {
if (!_ll_is_marked(unmarked->next))
arr[i++] = unmarked->data;
node = unmarked->next;
unmarked = _ll_unmarked(node);
}
while (i < l)
arr[i++] = 13373; // todo
}
// grid
typedef struct {
float lower_x, lower_y, cell_width, cell_height;
unsigned int cols, rows;
linked_list **cells;
} grid;
/*
__device__ grid *grid_create() {
grid *g = (grid *) malloc(sizeof(grid));
return g;
}
*/
grid *grid_host_create(float lower_x,
float lower_y,
float cell_width,
float cell_height,
unsigned int cols,
unsigned int rows
) {
grid *d_grid, h_grid;
hipMalloc((void **) &d_grid, sizeof(grid));
h_grid.lower_x = lower_x;
h_grid.lower_y = lower_y;
h_grid.cell_width = cell_width;
h_grid.cell_height = cell_height;
h_grid.cols = cols;
h_grid.rows = rows;
linked_list **cells;
hipMalloc((void **) &cells, rows * cols * sizeof(linked_list *));
h_grid.cells = cells;
printf("h_grid.cells %x\n", cells);
printf("h_grid.cell_height %f\n", h_grid.cell_height);
printf("h_grid.cell_width %f\n", h_grid.cell_width);
hipMemcpy(d_grid,
&h_grid,
sizeof(grid),
hipMemcpyHostToDevice
);
return d_grid;
}
__device__ void grid_alloc_cells(grid *g) {
unsigned int num_cells = g->rows * g->cols;
printf("num_cells %d\n", num_cells);
printf("grid.cells %x\n", g->cells);
for (unsigned int i = 0; i < num_cells; i++) {
g->cells[i] = ll_create();
//printf("created cell ll at %x\n", g->cells[i]);
}
}
__device__ linked_list *_grid_cell(grid *g, float2 pos) {
//printf("x %f\n", pos.x);
//printf("y %f\n", pos.y);
//printf("cell_width %f\n", g->cell_width);
//printf("cell_height %f\n", g->cell_height);
unsigned int col = floor((pos.x - g->lower_x) / g->cell_width);
unsigned int row = floor((pos.y - g->lower_y) / g->cell_height);
//printf("col %d\n", col);
//printf("row %d\n", row);
unsigned int i = row * (g->cols) + col;
if (col < g->cols && row < g->rows) {
//printf("_grid_cell: %x\n", g->cells[i]);
return g->cells[i];
} else {
printf("_grid_cell: NULL\n");
return NULL;
}
}
__device__ linked_list *grid_add_node(grid *g, float2 pos, int n) {
linked_list *cell = _grid_cell(g, pos);
/*
printf("not calling ll_insert\n");
return cell;
//*/
/*
printf("cell at %x\n", cell);
return cell;
*/
if (cell) {
//printf("calling ll_insert NOT\n");
ll_insert(cell, n);
}
return cell;
}
__device__ bool grid_remove_node(grid *g, float2 pos, int n) {
linked_list *cell = _grid_cell(g, pos);
if (cell)
return ll_remove(cell, n);
else
return false;
}
__device__ linked_list **grid_neighbours(grid *g, float2 pos) {
unsigned int col = floor((pos.x - g->lower_x) / g->cell_width);
unsigned int row = floor((pos.y - g->lower_y) / g->cell_height);
if (col >= g->cols || row >= g->rows) {
printf("this cell does not exist: col %d, row %d\n", col, row);
printf("no such cell, pos: %f, %f\n", pos.x, pos.y);
return NULL;
}
linked_list **neighbours = (linked_list **) malloc(9 * sizeof(linked_list *));
unsigned int i = 0;
neighbours[i++] = g->cells[row * g->cols + col];
unsigned int left_most = (col > 0) ? col - 1 : 0;
unsigned int right_most = (col + 1 < g->cols) ? col + 1 : col;
if (row > 0) {
for (unsigned int c = left_most; c <= right_most; c++) {
neighbours[i++] = g->cells[(row - 1) * g->cols + c];
}
}
if (left_most < col)
neighbours[i++] = g->cells[row * g->cols + left_most];
if (right_most > col)
neighbours[i++] = g->cells[row * g->cols + right_most];
if (row + 1 < g->rows) {
for (unsigned int c = left_most; c <= right_most; c++) {
neighbours[i++] = g->cells[(row + 1) * g->cols + c];
}
}
while (i < 9)
neighbours[i++] = NULL;
return neighbours;
}
typedef struct {
float2 pos;
float2 waypoint;
float vel;
int pause;
float so;
} random_waypoint_node;
typedef struct {
float2 pos;
float direction;
float vel;
int pause;
float so;
} random_direction_node;
typedef struct {
float x_lower;
float x_upper;
float y_lower;
float y_upper;
} world_aabb;
typedef struct {
world_aabb world;
float max_velocity;
float min_so, max_so;
unsigned int pause;
} random_waypoint_config;
__device__ float random_float_in_range(hiprandState *rand_state,
int i,
float lower,
float upper
) {
return hiprand_uniform(&rand_state[i]) * (upper - lower) + lower;
}
__device__ unsigned int random_int_in_range(hiprandState *rand_state,
int i,
unsigned int lower,
unsigned int upper
) {
return hiprand(&rand_state[i]) % (upper - lower) + lower;
}
__global__ void grid_init(grid *grid,
unsigned int num_nodes
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i == 0) {
printf("grid_alloc_cells\n");
grid_alloc_cells(grid);
}
}
__global__ void random_waypoint_init(hiprandState *rand_state,
random_waypoint_config* config,
random_waypoint_node* nodes,
grid *grid,
unsigned int num_nodes
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_nodes) {
hiprand_init(clock64(), i, 0, &rand_state[i]);
nodes[i].pos = make_float2(random_float_in_range(rand_state,
i,
config->world.x_lower,
config->world.x_upper
),
random_float_in_range(rand_state,
i,
config->world.y_lower,
config->world.y_upper
)
);
nodes[i].waypoint = nodes[i].pos;
nodes[i].vel = 0.0;
nodes[i].pause = 1;
nodes[i].so = random_float_in_range(rand_state,
i,
config->min_so,
config->max_so
);
grid_add_node(grid, nodes[i].pos, i);
printf("%d pos %f, %f\n", i, nodes[i].pos.x, nodes[i].pos.y);
}
}
__device__ bool random_waypoint_is_safe(random_waypoint_node *nodes,
grid *grid,
int i,
float2 pos
) {
linked_list **neighbours = grid_neighbours(grid, pos);
unsigned int count = 0;
for (unsigned int j = 0; j < 9; j++)
if (neighbours[j]) {
count++;
linked_list_node *n = ll_first(neighbours[j]);
while (n != neighbours[j]->tail) {
if (n->data != i) {
float dx = nodes[n->data].pos.x - pos.x;
float dy = nodes[n->data].pos.y - pos.y;
float dsq = dx * dx + dy * dy;
float safe_d = nodes[i].so + nodes[n->data].vel + nodes[n->data].so;
//printf("dsq %f, safe_d**2 %f\n", dsq, safe_d * safe_d);
if (dsq < (safe_d * safe_d)) {
/*
printf("neighbour at %f, %f is too close to %f, %f\n",
nodes[n->data].pos.x,
nodes[n->data].pos.y,
nodes[i].pos.x,
nodes[i].pos.y
);
*/
free(neighbours);
return false;
}
}
n = n->next;
}
}
//printf("%d neighbour regions, all safe\n", count);
free(neighbours);
return true;
}
__device__ bool random_direction_is_safe(random_direction_node *nodes,
grid *grid,
int i,
float2 pos
) {
linked_list **neighbours = grid_neighbours(grid, pos);
unsigned int count = 0;
for (unsigned int j = 0; j < 9; j++) {
//printf("neighbours %x\n", neighbours[j]);
if (neighbours[j]) {
count++;
linked_list_node *n = ll_first(neighbours[j]);
while (n && n != neighbours[j]->tail) {
//printf("neighbour %x\n", n);
while (_ll_is_marked(n->next)) {
//printf("skipping %x\n", n);
n = _ll_unmarked(n->next);
}
if (n) {
if (n->data != i) {
float dx = nodes[n->data].pos.x - pos.x;
float dy = nodes[n->data].pos.y - pos.y;
float dsq = dx * dx + dy * dy;
float safe_d = nodes[i].so + nodes[n->data].vel + nodes[n->data].so;
//printf("dsq %f, safe_d**2 %f\n", dsq, safe_d * safe_d);
if (dsq < (safe_d * safe_d)) {
/*
printf("neighbour at %f, %f is too close to %f, %f\n",
nodes[n->data].pos.x,
nodes[n->data].pos.y,
nodes[i].pos.x,
nodes[i].pos.y
);
*/
free(neighbours);
return false;
}
}
n = _ll_unmarked(n->next);
}
}
}
}
//printf("%d neighbour regions, all safe\n", count);
free(neighbours);
return true;
}
__global__ void random_waypoint_step(hiprandState *rand_state,
random_waypoint_config* config,
random_waypoint_node* nodes,
grid *grid,
unsigned int num_nodes
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_nodes) {
//*
if (nodes[i].pause > 0) {
if (--nodes[i].pause <= 0) {
nodes[i].waypoint =
make_float2(random_float_in_range(rand_state,
i,
config->world.x_lower,
config->world.x_upper
),
random_float_in_range(rand_state,
i,
config->world.y_lower,
config->world.y_upper
)
);
nodes[i].vel = hiprand_uniform(&rand_state[i]) * config->max_velocity;
}
} else {
float2 to_waypoint = make_float2(nodes[i].waypoint.x - nodes[i].pos.x,
nodes[i].waypoint.y - nodes[i].pos.y
);
float d = sqrt(to_waypoint.x * to_waypoint.x
+ to_waypoint.y * to_waypoint.y
);
if (d <= nodes[i].vel) {
nodes[i].pos = nodes[i].waypoint;
nodes[i].pause = config->pause;
} else {
to_waypoint.x *= nodes[i].vel / d;
to_waypoint.y *= nodes[i].vel / d;
float2 candidate = make_float2(nodes[i].pos.x + to_waypoint.x,
nodes[i].pos.y + to_waypoint.y
);
if (random_waypoint_is_safe(nodes, grid, i, candidate)) {
//printf("moving %d\n", i);
if (grid_remove_node(grid, nodes[i].pos, i))
grid_add_node(grid, candidate, i);
nodes[i].pos = candidate;
} else {
printf("can't move %d\n", i);
}
}
}
//*/
}
}
// random direction
__global__ void random_direction_init(hiprandState *rand_state,
random_waypoint_config* config,
random_direction_node* nodes,
grid *grid,
unsigned int num_nodes
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
//*
if (i < num_nodes) {
hiprand_init(clock64(), i, 0, &rand_state[i]);
nodes[i].pos = make_float2(random_float_in_range(rand_state,
i,
config->world.x_lower,
config->world.x_upper
),
random_float_in_range(rand_state,
i,
config->world.y_lower,
config->world.y_upper
)
);
nodes[i].direction = 0.0;
nodes[i].vel = 0.0;
nodes[i].pause = 1;
nodes[i].so = random_float_in_range(rand_state,
i,
config->min_so,
config->max_so
);
linked_list *cell = grid_add_node(grid, nodes[i].pos, i);
}
//*/
}
__device__ float2 cut(float2 p1, float2 p2, float2 p3, float2 p4) {
return make_float2((
(p4.x - p3.x) * (p2.x * p1.y - p1.x * p2.y)
- (p2.x - p1.x) * (p4.x * p3.y - p3.x * p4.y)
)
/
(
(p4.y - p3.y) * (p2.x - p1.x)
- (p2.y - p1.y) * (p4.x - p3.x)
),
(
(p1.y - p2.y) * (p4.x * p3.y - p3.x * p4.y)
- (p3.y - p4.y) * (p2.x * p1.y - p1.x * p2.y)
)
/
(
(p4.y - p3.y) * (p2.x - p1.x)
- (p2.y - p1.y) * (p4.x - p3.x)
)
);
}
__device__ float distance_squared(float2 p1, float2 p2) {
float dx = p1.x - p2.x;
float dy = p1.y - p2.y;
return dx * dx + dy * dy;
}
__global__ void random_direction_step(hiprandState *rand_state,
random_waypoint_config* config,
random_direction_node* nodes,
grid *grid,
unsigned int num_nodes
) {
printf("%d * %d + %d\n", blockDim.x, blockIdx.x, threadIdx.x);
int i = blockDim.x * blockIdx.x + threadIdx.x;
printf("%d\n", i);
float2 top_left = make_float2(config->world.x_lower, config->world.y_lower);
float2 bottom_left = make_float2(config->world.x_lower, config->world.y_upper);
float2 top_right = make_float2(config->world.x_upper, config->world.y_lower);
float2 bottom_right = make_float2(config->world.x_upper, config->world.y_upper);
//printf("tl %f, %f\n", top_left.x, top_left.y);
//printf("bl %f, %f\n", bottom_left.x, bottom_left.y);
//printf("tr %f, %f\n", top_right.x, top_right.y);
//printf("br %f, %f\n", bottom_right.x, bottom_right.y);
if (i < num_nodes) {
printf("node %d\n", i);
if (nodes[i].pause > 0) {
if (--nodes[i].pause <= 0) {
printf("%d: choose new direction\n", i);
nodes[i].direction =
random_float_in_range(rand_state,
i,
0,
2 * M_PI
);
nodes[i].vel = hiprand_uniform(&rand_state[i]) * config->max_velocity;
}
} else {
float2 forward = make_float2(cos(nodes[i].direction),
sin(nodes[i].direction)
);
float2 candidate = make_float2(nodes[i].pos.x + forward.x,
nodes[i].pos.y + forward.y
);
printf("candidate for %d: %f, %f\n", i, candidate.x, candidate.y);
// out of bounds?
bool oob_left = candidate.x < config->world.x_lower;
bool oob_right = candidate.x > config->world.x_upper;
bool oob_up = candidate.y < config->world.y_lower;
bool oob_down = candidate.y > config->world.y_upper;
//printf("%d: %f, %f | oob? l%d r%d u%d d%d\n",
// i,
// candidate.x,
// candidate.y,
// oob_left,
// oob_right,
// oob_up,
// oob_down
// );
if (oob_left) {
if (oob_up) {
float2 c1 = cut(nodes[i].pos,
candidate,
top_left,
bottom_left
);
float2 c2 = cut(nodes[i].pos,
candidate,
top_left,
top_right
);
float d1 = distance_squared(candidate, c1);
float d2 = distance_squared(candidate, c2);
if (d1 < d2)
candidate = c1;
else
candidate = c2;
}
else if (oob_down) {
float2 c1 = cut(nodes[i].pos,
candidate,
top_left,
bottom_left
);
float2 c2 = cut(nodes[i].pos,
candidate,
bottom_left,
bottom_right
);
float d1 = distance_squared(candidate, c1);
float d2 = distance_squared(candidate, c2);
if (d1 < d2)
candidate = c1;
else
candidate = c2;
}
else {
candidate = cut(nodes[i].pos,
candidate,
top_left,
bottom_left
);
}
} else if (oob_right) {
if (oob_up) {
float2 c1 = cut(nodes[i].pos,
candidate,
top_right,
bottom_right
);
float2 c2 = cut(nodes[i].pos,
candidate,
top_left,
top_right
);
float d1 = distance_squared(candidate, c1);
float d2 = distance_squared(candidate, c2);
if (d1 < d2)
candidate = c1;
else
candidate = c2;
}
else if (oob_down) {
float2 c1 = cut(nodes[i].pos,
candidate,
top_right,
bottom_right
);
float2 c2 = cut(nodes[i].pos,
candidate,
bottom_left,
bottom_right
);
float d1 = distance_squared(candidate, c1);
float d2 = distance_squared(candidate, c2);
if (d1 < d2)
candidate = c1;
else
candidate = c2;
}
else {
candidate = cut(nodes[i].pos,
candidate,
top_right,
bottom_right
);
}
} else if (oob_up) {
candidate = cut(nodes[i].pos,
candidate,
top_left,
top_right
);
} else if (oob_down) {
candidate = cut(nodes[i].pos,
candidate,
bottom_left,
bottom_right
);
}
__syncthreads();
if (random_direction_is_safe(nodes, grid, i, candidate)) {
//printf("moving %d\n", i);
if (grid_remove_node(grid, nodes[i].pos, i))
grid_add_node(grid, candidate, i);
nodes[i].pos = candidate;
if (oob_left || oob_right || oob_up || oob_down) {
nodes[i].pause = config->pause;
printf("clamped to %f, %f\n", candidate.x, candidate.y);
}
} else {
printf("can't move %d\n", i);
}
}
}
}
int main(int argc, char **argv) {
hipError_t err;
unsigned int num_threads = 32; // 1024;
unsigned int num_blocks = 1;
unsigned int num_nodes = num_threads * num_blocks;
unsigned int num_frames = 10;
random_waypoint_node *h_nodes;
//random_direction_node *h_nodes;
random_waypoint_config h_config;
h_config.world.x_lower = -10.0;
h_config.world.x_upper = 10.0;
h_config.world.y_lower = -10.0;
h_config.world.y_upper = 10.0;
h_config.max_velocity = 0.5;
h_config.pause = 2;
h_config.min_so = 0.01;
h_config.max_so = 0.1;
float cell_size = h_config.max_so * 2 + h_config.max_velocity;
unsigned int grid_cols =
ceil((h_config.world.x_upper - h_config.world.x_lower) / cell_size);
unsigned int grid_rows =
ceil((h_config.world.y_upper - h_config.world.y_lower) / cell_size);
printf("%d nodes\n", num_nodes);
//printf("grid: %d x %d\n", grid_cols, grid_rows);
random_waypoint_node *d_nodes;
//random_direction_node *d_nodes;
random_waypoint_config *d_config;
grid *d_grid;
hiprandState *d_rand_state;
h_nodes = (random_waypoint_node *)
malloc(num_nodes * sizeof(random_waypoint_node));
/*
h_nodes = (random_direction_node *)
malloc(num_nodes * sizeof(random_direction_node));
*/
hipMalloc((void **) &d_nodes, num_nodes * sizeof(random_waypoint_node));
//cudaMalloc((void **) &d_nodes, num_nodes * sizeof(random_direction_node));
hipMalloc((void **) &d_config, sizeof(random_waypoint_config));
hipMalloc((void **) &d_rand_state, num_threads * sizeof(hiprandState));
hipMemcpy(d_config,
&h_config,
sizeof(random_waypoint_config),
hipMemcpyHostToDevice
);
d_grid = grid_host_create(h_config.world.x_lower,
h_config.world.y_lower,
cell_size,
cell_size,
grid_cols,
grid_rows
);
printf("d_grid %x\n", d_grid);
printf("init\n");
printf("grid_init\n");
grid_init<<<num_blocks, num_threads>>>(d_grid,
num_nodes
);
err = hipGetLastError();
if (err != hipSuccess)
printf("ERR: %s\n", hipGetErrorString(err));
printf("random_waypoint_init\n");
random_waypoint_init<<<num_blocks, num_threads>>>(d_rand_state,
//random_direction_init<<<num_blocks, num_threads>>>(d_rand_state,
d_config,
d_nodes,
d_grid,
num_nodes
);
err = hipGetLastError();
if (err != hipSuccess)
printf("ERR: %s\n", hipGetErrorString(err));
printf("/init\n");
for (unsigned int i = 0; i < num_frames; i++) {
printf("\nframe %u\n", i);
//*
random_waypoint_step<<<num_blocks, num_threads>>>(d_rand_state,
//random_direction_step<<<num_blocks, num_threads>>>(d_rand_state,
d_config,
d_nodes,
d_grid,
num_nodes
);
err = hipGetLastError();
if (err != hipSuccess)
printf("ERR: %s\n", hipGetErrorString(err));
//*/
printf("memcpy %x (device) to %x (host)\n", d_nodes, h_nodes);
hipMemcpy(h_nodes,
d_nodes,
num_nodes * sizeof(random_waypoint_node),
//num_nodes * sizeof(random_direction_node),
hipMemcpyDeviceToHost
);
err = hipGetLastError();
if (err != hipSuccess)
printf("ERR: %s\n", hipGetErrorString(err));
for (unsigned int i = 0; i < num_nodes; i++) {
printf("%u: %f,%f (v = %f) -> %f, %f | p = %d\n",
i,
h_nodes[i].pos.x,
h_nodes[i].pos.y,
h_nodes[i].vel,
//h_nodes[i].direction,
h_nodes[i].waypoint.x,
h_nodes[i].waypoint.y,
h_nodes[i].pause
);
}
}
hipFree(d_rand_state);
hipFree(d_config);
hipFree(d_nodes);
free(h_nodes);
exit(EXIT_SUCCESS);
} | .text
.file "main.hip"
.globl _Z16grid_host_createffffjj # -- Begin function _Z16grid_host_createffffjj
.p2align 4, 0x90
.type _Z16grid_host_createffffjj,@function
_Z16grid_host_createffffjj: # @_Z16grid_host_createffffjj
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $72, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
movl %esi, %ebx
movl %edi, %ebp
movss %xmm3, 20(%rsp) # 4-byte Spill
movss %xmm2, 16(%rsp) # 4-byte Spill
movss %xmm1, 12(%rsp) # 4-byte Spill
movss %xmm0, 8(%rsp) # 4-byte Spill
leaq 24(%rsp), %rdi
movl $32, %esi
callq hipMalloc
movss 8(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 32(%rsp)
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 36(%rsp)
movss 16(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 40(%rsp)
movss 20(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 44(%rsp)
movl %ebp, 48(%rsp)
movl %ebx, 52(%rsp)
imull %ebp, %ebx
shlq $3, %rbx
leaq 64(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 64(%rsp), %rsi
movq %rsi, 56(%rsp)
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movss 44(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
movss 40(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
movq 24(%rsp), %rdi
leaq 32(%rsp), %rsi
movl $32, %edx
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rax
addq $72, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z16grid_host_createffffjj, .Lfunc_end0-_Z16grid_host_createffffjj
.cfi_endproc
# -- End function
.globl _Z24__device_stub__grid_initP4gridj # -- Begin function _Z24__device_stub__grid_initP4gridj
.p2align 4, 0x90
.type _Z24__device_stub__grid_initP4gridj,@function
_Z24__device_stub__grid_initP4gridj: # @_Z24__device_stub__grid_initP4gridj
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z9grid_initP4gridj, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z24__device_stub__grid_initP4gridj, .Lfunc_end1-_Z24__device_stub__grid_initP4gridj
.cfi_endproc
# -- End function
.globl _Z35__device_stub__random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj # -- Begin function _Z35__device_stub__random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.p2align 4, 0x90
.type _Z35__device_stub__random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj,@function
_Z35__device_stub__random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj: # @_Z35__device_stub__random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z20random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end2:
.size _Z35__device_stub__random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj, .Lfunc_end2-_Z35__device_stub__random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.cfi_endproc
# -- End function
.globl _Z35__device_stub__random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj # -- Begin function _Z35__device_stub__random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.p2align 4, 0x90
.type _Z35__device_stub__random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj,@function
_Z35__device_stub__random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj: # @_Z35__device_stub__random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z20random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end3:
.size _Z35__device_stub__random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj, .Lfunc_end3-_Z35__device_stub__random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.cfi_endproc
# -- End function
.globl _Z36__device_stub__random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj # -- Begin function _Z36__device_stub__random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.p2align 4, 0x90
.type _Z36__device_stub__random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj,@function
_Z36__device_stub__random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj: # @_Z36__device_stub__random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z21random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end4:
.size _Z36__device_stub__random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj, .Lfunc_end4-_Z36__device_stub__random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.cfi_endproc
# -- End function
.globl _Z36__device_stub__random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj # -- Begin function _Z36__device_stub__random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.p2align 4, 0x90
.type _Z36__device_stub__random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj,@function
_Z36__device_stub__random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj: # @_Z36__device_stub__random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z21random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end5:
.size _Z36__device_stub__random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj, .Lfunc_end5-_Z36__device_stub__random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function main
.LCPI6_0:
.long 3240099840 # 0xc1200000
.long 1092616192 # 0x41200000
.long 3240099840 # 0xc1200000
.long 1092616192 # 0x41200000
.LCPI6_1:
.long 1060320051 # 0x3f333333
.long 1060320051 # 0x3f333333
.long 29 # 0x1d
.long 29 # 0x1d
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movabsq $4294967297, %r14 # imm = 0x100000001
movaps .LCPI6_0(%rip), %xmm0 # xmm0 = [3240099840,1092616192,3240099840,1092616192]
movups %xmm0, 168(%rsp)
movl $1056964608, 184(%rsp) # imm = 0x3F000000
movl $2, 196(%rsp)
movabsq $4453159313411921674, %rax # imm = 0x3DCCCCCD3C23D70A
movq %rax, 188(%rsp)
movl $.L.str.3, %edi
movl $32, %esi
xorl %eax, %eax
callq printf
movl $1024, %edi # imm = 0x400
callq malloc
movq %rax, %rbx
leaq 48(%rsp), %rdi
movl $1024, %esi # imm = 0x400
callq hipMalloc
leaq 88(%rsp), %rdi
movl $32, %esi
callq hipMalloc
leaq 136(%rsp), %rdi
movl $1536, %esi # imm = 0x600
callq hipMalloc
movq 88(%rsp), %rdi
leaq 168(%rsp), %rsi
movl $32, %edx
movl $1, %ecx
callq hipMemcpy
movss 168(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, 40(%rsp) # 4-byte Spill
movss 176(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, 164(%rsp) # 4-byte Spill
leaq 8(%rsp), %rdi
movl $32, %esi
callq hipMalloc
movss 40(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 96(%rsp)
movss 164(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 100(%rsp)
movaps .LCPI6_1(%rip), %xmm0 # xmm0 = [1060320051,1060320051,29,29]
movups %xmm0, 104(%rsp)
leaq 24(%rsp), %rdi
movl $6728, %esi # imm = 0x1A48
callq hipMalloc
movq 24(%rsp), %rsi
movq %rsi, 120(%rsp)
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movss 108(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
movss 104(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
movq 8(%rsp), %rdi
leaq 96(%rsp), %rsi
movl $32, %edx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rsi
movl $.L.str.4, %edi
movq %rsi, 40(%rsp) # 8-byte Spill
xorl %eax, %eax
callq printf
movl $.Lstr, %edi
callq puts@PLT
movl $.Lstr.1, %edi
callq puts@PLT
leaq 31(%r14), %r12
movq %r14, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_2
# %bb.1:
movq 40(%rsp), %rax # 8-byte Reload
movq %rax, 80(%rsp)
movl $32, 56(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 56(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 64(%rsp), %rcx
callq __hipPopCallConfiguration
movq 8(%rsp), %rsi
movl 16(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9grid_initP4gridj, %edi
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_2:
callq hipGetLastError
testl %eax, %eax
je .LBB6_4
# %bb.3:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.7, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
.LBB6_4:
movl $.Lstr.2, %edi
callq puts@PLT
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_6
# %bb.5:
movq 136(%rsp), %rax
movq 88(%rsp), %rcx
movq 48(%rsp), %rdx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movq %rdx, 64(%rsp)
movq 40(%rsp), %rax # 8-byte Reload
movq %rax, 56(%rsp)
movl $32, 20(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 72(%rsp), %rax
movq %rax, 104(%rsp)
leaq 64(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rax
movq %rax, 120(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 152(%rsp), %rdx
leaq 144(%rsp), %rcx
callq __hipPopCallConfiguration
movq 8(%rsp), %rsi
movl 16(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z20random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj, %edi
pushq 144(%rsp)
.cfi_adjust_cfa_offset 8
pushq 160(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_6:
callq hipGetLastError
testl %eax, %eax
je .LBB6_8
# %bb.7:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.7, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
.LBB6_8:
movl $.Lstr.3, %edi
callq puts@PLT
movq %rbx, %r15
addq $20, %r15
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB6_9: # =>This Loop Header: Depth=1
# Child Loop BB6_15 Depth 2
movl $.L.str.10, %edi
movl %r13d, %esi
xorl %eax, %eax
callq printf
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_11
# %bb.10: # in Loop: Header=BB6_9 Depth=1
movq 136(%rsp), %rax
movq 88(%rsp), %rcx
movq 48(%rsp), %rdx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movq %rdx, 64(%rsp)
movq 40(%rsp), %rax # 8-byte Reload
movq %rax, 56(%rsp)
movl $32, 20(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 72(%rsp), %rax
movq %rax, 104(%rsp)
leaq 64(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rax
movq %rax, 120(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 152(%rsp), %rdx
leaq 144(%rsp), %rcx
callq __hipPopCallConfiguration
movq 8(%rsp), %rsi
movl 16(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
movl $_Z20random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj, %edi
leaq 96(%rsp), %r9
pushq 144(%rsp)
.cfi_adjust_cfa_offset 8
pushq 160(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_11: # in Loop: Header=BB6_9 Depth=1
callq hipGetLastError
testl %eax, %eax
je .LBB6_13
# %bb.12: # in Loop: Header=BB6_9 Depth=1
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.7, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
.LBB6_13: # in Loop: Header=BB6_9 Depth=1
movq 48(%rsp), %rsi
movl $.L.str.11, %edi
movq %rbx, %rdx
xorl %eax, %eax
callq printf
movq 48(%rsp), %rsi
movl $1024, %edx # imm = 0x400
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB6_14
# %bb.18: # in Loop: Header=BB6_9 Depth=1
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.7, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
.LBB6_14: # %.preheader
# in Loop: Header=BB6_9 Depth=1
movq %r15, %r14
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB6_15: # Parent Loop BB6_9 Depth=1
# => This Inner Loop Header: Depth=2
movss -20(%r14), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss -16(%r14), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movss -4(%r14), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
movss -12(%r14), %xmm3 # xmm3 = mem[0],zero,zero,zero
cvtss2sd %xmm3, %xmm3
movss -8(%r14), %xmm4 # xmm4 = mem[0],zero,zero,zero
cvtss2sd %xmm4, %xmm4
movl (%r14), %edx
movl $.L.str.12, %edi
movl %ebp, %esi
movb $5, %al
callq printf
incq %rbp
addq $32, %r14
cmpq $32, %rbp
jne .LBB6_15
# %bb.16: # in Loop: Header=BB6_9 Depth=1
incl %r13d
cmpl $10, %r13d
jne .LBB6_9
# %bb.17:
movq 136(%rsp), %rdi
callq hipFree
movq 88(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
xorl %edi, %edi
callq exit
.Lfunc_end6:
.size main, .Lfunc_end6-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9grid_initP4gridj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z20random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z20random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "h_grid.cells %x\n"
.size .L.str, 17
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "h_grid.cell_height %f\n"
.size .L.str.1, 23
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "h_grid.cell_width %f\n"
.size .L.str.2, 22
.type _Z9grid_initP4gridj,@object # @_Z9grid_initP4gridj
.section .rodata,"a",@progbits
.globl _Z9grid_initP4gridj
.p2align 3, 0x0
_Z9grid_initP4gridj:
.quad _Z24__device_stub__grid_initP4gridj
.size _Z9grid_initP4gridj, 8
.type _Z20random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj,@object # @_Z20random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.globl _Z20random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.p2align 3, 0x0
_Z20random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj:
.quad _Z35__device_stub__random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.size _Z20random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj, 8
.type _Z20random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj,@object # @_Z20random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.globl _Z20random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.p2align 3, 0x0
_Z20random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj:
.quad _Z35__device_stub__random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.size _Z20random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj, 8
.type _Z21random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj,@object # @_Z21random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.globl _Z21random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.p2align 3, 0x0
_Z21random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj:
.quad _Z36__device_stub__random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.size _Z21random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj, 8
.type _Z21random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj,@object # @_Z21random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.globl _Z21random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.p2align 3, 0x0
_Z21random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj:
.quad _Z36__device_stub__random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.size _Z21random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj, 8
.type .L.str.3,@object # @.str.3
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.3:
.asciz "%d nodes\n"
.size .L.str.3, 10
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "d_grid %x\n"
.size .L.str.4, 11
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "ERR: %s\n"
.size .L.str.7, 9
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "\nframe %u\n"
.size .L.str.10, 11
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "memcpy %x (device) to %x (host)\n"
.size .L.str.11, 33
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "%u: %f,%f (v = %f) -> %f, %f | p = %d\n"
.size .L.str.12, 39
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9grid_initP4gridj"
.size .L__unnamed_1, 20
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z20random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj"
.size .L__unnamed_2, 95
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z20random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj"
.size .L__unnamed_3, 95
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z21random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj"
.size .L__unnamed_4, 97
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "_Z21random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj"
.size .L__unnamed_5, 97
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "init"
.size .Lstr, 5
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "grid_init"
.size .Lstr.1, 10
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "random_waypoint_init"
.size .Lstr.2, 21
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "/init"
.size .Lstr.3, 6
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__grid_initP4gridj
.addrsig_sym _Z35__device_stub__random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.addrsig_sym _Z35__device_stub__random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.addrsig_sym _Z36__device_stub__random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.addrsig_sym _Z36__device_stub__random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9grid_initP4gridj
.addrsig_sym _Z20random_waypoint_initP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.addrsig_sym _Z20random_waypoint_stepP12hiprandStateP22random_waypoint_configP20random_waypoint_nodeP4gridj
.addrsig_sym _Z21random_direction_initP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.addrsig_sym _Z21random_direction_stepP12hiprandStateP22random_waypoint_configP21random_direction_nodeP4gridj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
__global__ void vecMultiply(int *arr, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid<size){
for(int i = 0;i<100000;i++){
*(arr + tid) += 10;
}
}
}
int main(int argc, char *argv[]){
// Initialize
int elementSize = 64;
int threadsPerBlock = 32;
int blockSize = (elementSize+threadsPerBlock-1)/threadsPerBlock;
int *host_input_arr;
cudaMallocManaged((void**)&host_input_arr, sizeof(int) * elementSize);
for(int i = 0;i<elementSize;i++){
host_input_arr[i] = i;
}
vecMultiply<<<blockSize, threadsPerBlock>>>(host_input_arr, elementSize);
cudaDeviceSynchronize();
for(int i = 0;i<elementSize;i++){
printf("%d ", host_input_arr[i]);
}
printf("\n");
cudaFree(host_input_arr);
return 0;
} | code for sm_80
Function : _Z11vecMultiplyPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ IADD3 R5, R0, 0xf4240, RZ ; /* 0x000f424000057810 */
/* 0x004fca0007ffe0ff */
/*00b0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
__global__ void vecMultiply(int *arr, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid<size){
for(int i = 0;i<100000;i++){
*(arr + tid) += 10;
}
}
}
int main(int argc, char *argv[]){
// Initialize
int elementSize = 64;
int threadsPerBlock = 32;
int blockSize = (elementSize+threadsPerBlock-1)/threadsPerBlock;
int *host_input_arr;
cudaMallocManaged((void**)&host_input_arr, sizeof(int) * elementSize);
for(int i = 0;i<elementSize;i++){
host_input_arr[i] = i;
}
vecMultiply<<<blockSize, threadsPerBlock>>>(host_input_arr, elementSize);
cudaDeviceSynchronize();
for(int i = 0;i<elementSize;i++){
printf("%d ", host_input_arr[i]);
}
printf("\n");
cudaFree(host_input_arr);
return 0;
} | .file "tmpxft_00080766_00000000-6_UnifiedMemoryAccess.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z11vecMultiplyPiiPii
.type _Z32__device_stub__Z11vecMultiplyPiiPii, @function
_Z32__device_stub__Z11vecMultiplyPiiPii:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z11vecMultiplyPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z32__device_stub__Z11vecMultiplyPiiPii, .-_Z32__device_stub__Z11vecMultiplyPiiPii
.globl _Z11vecMultiplyPii
.type _Z11vecMultiplyPii, @function
_Z11vecMultiplyPii:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z11vecMultiplyPiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z11vecMultiplyPii, .-_Z11vecMultiplyPii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d "
.LC1:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $56, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movl $1, %edx
movl $256, %esi
call cudaMallocManaged@PLT
movl $0, %eax
.L12:
movq 8(%rsp), %rdx
movl %eax, (%rdx,%rax,4)
addq $1, %rax
cmpq $64, %rax
jne .L12
movl $32, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $2, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
call cudaDeviceSynchronize@PLT
movl $0, %ebx
leaq .LC0(%rip), %rbp
.L14:
movq 8(%rsp), %rax
movl (%rax,%rbx), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq $256, %rbx
jne .L14
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movl $64, %esi
movq 8(%rsp), %rdi
call _Z32__device_stub__Z11vecMultiplyPiiPii
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z11vecMultiplyPii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z11vecMultiplyPii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
__global__ void vecMultiply(int *arr, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid<size){
for(int i = 0;i<100000;i++){
*(arr + tid) += 10;
}
}
}
int main(int argc, char *argv[]){
// Initialize
int elementSize = 64;
int threadsPerBlock = 32;
int blockSize = (elementSize+threadsPerBlock-1)/threadsPerBlock;
int *host_input_arr;
cudaMallocManaged((void**)&host_input_arr, sizeof(int) * elementSize);
for(int i = 0;i<elementSize;i++){
host_input_arr[i] = i;
}
vecMultiply<<<blockSize, threadsPerBlock>>>(host_input_arr, elementSize);
cudaDeviceSynchronize();
for(int i = 0;i<elementSize;i++){
printf("%d ", host_input_arr[i]);
}
printf("\n");
cudaFree(host_input_arr);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
__global__ void vecMultiply(int *arr, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid<size){
for(int i = 0;i<100000;i++){
*(arr + tid) += 10;
}
}
}
int main(int argc, char *argv[]){
// Initialize
int elementSize = 64;
int threadsPerBlock = 32;
int blockSize = (elementSize+threadsPerBlock-1)/threadsPerBlock;
int *host_input_arr;
hipMallocManaged((void**)&host_input_arr, sizeof(int) * elementSize);
for(int i = 0;i<elementSize;i++){
host_input_arr[i] = i;
}
vecMultiply<<<blockSize, threadsPerBlock>>>(host_input_arr, elementSize);
hipDeviceSynchronize();
for(int i = 0;i<elementSize;i++){
printf("%d ", host_input_arr[i]);
}
printf("\n");
hipFree(host_input_arr);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
__global__ void vecMultiply(int *arr, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid<size){
for(int i = 0;i<100000;i++){
*(arr + tid) += 10;
}
}
}
int main(int argc, char *argv[]){
// Initialize
int elementSize = 64;
int threadsPerBlock = 32;
int blockSize = (elementSize+threadsPerBlock-1)/threadsPerBlock;
int *host_input_arr;
hipMallocManaged((void**)&host_input_arr, sizeof(int) * elementSize);
for(int i = 0;i<elementSize;i++){
host_input_arr[i] = i;
}
vecMultiply<<<blockSize, threadsPerBlock>>>(host_input_arr, elementSize);
hipDeviceSynchronize();
for(int i = 0;i<elementSize;i++){
printf("%d ", host_input_arr[i]);
}
printf("\n");
hipFree(host_input_arr);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11vecMultiplyPii
.globl _Z11vecMultiplyPii
.p2align 8
.type _Z11vecMultiplyPii,@function
_Z11vecMultiplyPii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, 0xf4240, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11vecMultiplyPii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11vecMultiplyPii, .Lfunc_end0-_Z11vecMultiplyPii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11vecMultiplyPii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11vecMultiplyPii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
__global__ void vecMultiply(int *arr, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid<size){
for(int i = 0;i<100000;i++){
*(arr + tid) += 10;
}
}
}
int main(int argc, char *argv[]){
// Initialize
int elementSize = 64;
int threadsPerBlock = 32;
int blockSize = (elementSize+threadsPerBlock-1)/threadsPerBlock;
int *host_input_arr;
hipMallocManaged((void**)&host_input_arr, sizeof(int) * elementSize);
for(int i = 0;i<elementSize;i++){
host_input_arr[i] = i;
}
vecMultiply<<<blockSize, threadsPerBlock>>>(host_input_arr, elementSize);
hipDeviceSynchronize();
for(int i = 0;i<elementSize;i++){
printf("%d ", host_input_arr[i]);
}
printf("\n");
hipFree(host_input_arr);
return 0;
} | .text
.file "UnifiedMemoryAccess.hip"
.globl _Z26__device_stub__vecMultiplyPii # -- Begin function _Z26__device_stub__vecMultiplyPii
.p2align 4, 0x90
.type _Z26__device_stub__vecMultiplyPii,@function
_Z26__device_stub__vecMultiplyPii: # @_Z26__device_stub__vecMultiplyPii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z11vecMultiplyPii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z26__device_stub__vecMultiplyPii, .Lfunc_end0-_Z26__device_stub__vecMultiplyPii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $96, %rsp
.cfi_def_cfa_offset 112
.cfi_offset %rbx, -16
leaq 8(%rsp), %rdi
movl $256, %esi # imm = 0x100
movl $1, %edx
callq hipMallocManaged
xorl %eax, %eax
movq 8(%rsp), %rcx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, (%rcx,%rax,4)
incq %rax
cmpq $64, %rax
jne .LBB1_1
# %bb.2:
movabsq $4294967298, %rdi # imm = 0x100000002
leaq 30(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
movl $64, 20(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11vecMultiplyPii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
callq hipDeviceSynchronize
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movq 8(%rsp), %rax
movl (%rax,%rbx,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $64, %rbx
jne .LBB1_5
# %bb.6:
movl $10, %edi
callq putchar@PLT
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $96, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11vecMultiplyPii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11vecMultiplyPii,@object # @_Z11vecMultiplyPii
.section .rodata,"a",@progbits
.globl _Z11vecMultiplyPii
.p2align 3, 0x0
_Z11vecMultiplyPii:
.quad _Z26__device_stub__vecMultiplyPii
.size _Z11vecMultiplyPii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d "
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11vecMultiplyPii"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__vecMultiplyPii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11vecMultiplyPii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z11vecMultiplyPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ IADD3 R5, R0, 0xf4240, RZ ; /* 0x000f424000057810 */
/* 0x004fca0007ffe0ff */
/*00b0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11vecMultiplyPii
.globl _Z11vecMultiplyPii
.p2align 8
.type _Z11vecMultiplyPii,@function
_Z11vecMultiplyPii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, 0xf4240, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11vecMultiplyPii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11vecMultiplyPii, .Lfunc_end0-_Z11vecMultiplyPii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11vecMultiplyPii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11vecMultiplyPii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00080766_00000000-6_UnifiedMemoryAccess.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z11vecMultiplyPiiPii
.type _Z32__device_stub__Z11vecMultiplyPiiPii, @function
_Z32__device_stub__Z11vecMultiplyPiiPii:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z11vecMultiplyPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z32__device_stub__Z11vecMultiplyPiiPii, .-_Z32__device_stub__Z11vecMultiplyPiiPii
.globl _Z11vecMultiplyPii
.type _Z11vecMultiplyPii, @function
_Z11vecMultiplyPii:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z11vecMultiplyPiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z11vecMultiplyPii, .-_Z11vecMultiplyPii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d "
.LC1:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $56, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movl $1, %edx
movl $256, %esi
call cudaMallocManaged@PLT
movl $0, %eax
.L12:
movq 8(%rsp), %rdx
movl %eax, (%rdx,%rax,4)
addq $1, %rax
cmpq $64, %rax
jne .L12
movl $32, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $2, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
call cudaDeviceSynchronize@PLT
movl $0, %ebx
leaq .LC0(%rip), %rbp
.L14:
movq 8(%rsp), %rax
movl (%rax,%rbx), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq $256, %rbx
jne .L14
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movl $64, %esi
movq 8(%rsp), %rdi
call _Z32__device_stub__Z11vecMultiplyPiiPii
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z11vecMultiplyPii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z11vecMultiplyPii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "UnifiedMemoryAccess.hip"
.globl _Z26__device_stub__vecMultiplyPii # -- Begin function _Z26__device_stub__vecMultiplyPii
.p2align 4, 0x90
.type _Z26__device_stub__vecMultiplyPii,@function
_Z26__device_stub__vecMultiplyPii: # @_Z26__device_stub__vecMultiplyPii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z11vecMultiplyPii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z26__device_stub__vecMultiplyPii, .Lfunc_end0-_Z26__device_stub__vecMultiplyPii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $96, %rsp
.cfi_def_cfa_offset 112
.cfi_offset %rbx, -16
leaq 8(%rsp), %rdi
movl $256, %esi # imm = 0x100
movl $1, %edx
callq hipMallocManaged
xorl %eax, %eax
movq 8(%rsp), %rcx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, (%rcx,%rax,4)
incq %rax
cmpq $64, %rax
jne .LBB1_1
# %bb.2:
movabsq $4294967298, %rdi # imm = 0x100000002
leaq 30(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
movl $64, 20(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11vecMultiplyPii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
callq hipDeviceSynchronize
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movq 8(%rsp), %rax
movl (%rax,%rbx,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $64, %rbx
jne .LBB1_5
# %bb.6:
movl $10, %edi
callq putchar@PLT
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $96, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11vecMultiplyPii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11vecMultiplyPii,@object # @_Z11vecMultiplyPii
.section .rodata,"a",@progbits
.globl _Z11vecMultiplyPii
.p2align 3, 0x0
_Z11vecMultiplyPii:
.quad _Z26__device_stub__vecMultiplyPii
.size _Z11vecMultiplyPii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d "
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11vecMultiplyPii"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__vecMultiplyPii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11vecMultiplyPii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | __global__ void reduce_add_kernel(const float *d_in, float *d_out, int input_size) {
extern __shared__ float sdata[];
int index = threadIdx.x + blockDim.x * blockIdx.x;
int myId = threadIdx.x;
// Put whole block in shared memory
sdata[myId] = (index < input_size) ? d_in[index] : 0;
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i>>=1) {
if (myId < i) {
sdata[myId] = sdata[myId] + sdata[myId+i];
}
__syncthreads();
}
if (myId == 0) {
d_out[blockIdx.x] = sdata[0];
}
}
int next_power_of_two(int number) {
int result = 1;
while(result < number) {
result <<= 1;
}
return result;
}
void primitive_reduce_add(float *d_input, float *d_result, int input_size) {
const int SIZE_AS_POT = next_power_of_two(input_size);
int shared_size = sizeof(float) * SIZE_AS_POT;
reduce_add_kernel<<<1, SIZE_AS_POT, shared_size>>>(d_input, d_result, input_size);
} | code for sm_80
Function : _Z17reduce_add_kernelPKfPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R0, RZ, RZ, RZ ; /* 0x000000ffff007224 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R6, c[0x0][0x0], R7 ; /* 0x0000000006027a24 */
/* 0x001fca00078e0207 */
/*0060*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06270 */
/*0070*/ @!P0 MOV R3, 0x4 ; /* 0x0000000400038802 */
/* 0x000fca0000000f00 */
/*0080*/ @!P0 IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002028625 */
/* 0x000fca00078e0203 */
/*0090*/ @!P0 LDG.E R0, [R2.64] ; /* 0x0000000602008981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*00b0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f05270 */
/*00c0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00d0*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf25270 */
/*00e0*/ STS [R7.X4], R0 ; /* 0x0000000007007388 */
/* 0x0041e80000004800 */
/*00f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*0100*/ @!P1 BRA 0x1d0 ; /* 0x000000c000009947 */
/* 0x000fea0003800000 */
/*0110*/ SHF.L.U32 R0, R7, 0x2, RZ ; /* 0x0000000207007819 */
/* 0x001fe200000006ff */
/*0120*/ IMAD.U32 R3, RZ, RZ, UR4 ; /* 0x00000004ff037e24 */
/* 0x000fca000f8e00ff */
/*0130*/ ISETP.GE.AND P1, PT, R7, R3, PT ; /* 0x000000030700720c */
/* 0x000fda0003f26270 */
/*0140*/ @!P1 LEA R4, R3, R0, 0x2 ; /* 0x0000000003049211 */
/* 0x000fe200078e10ff */
/*0150*/ @!P1 LDS R2, [R7.X4] ; /* 0x0000000007029984 */
/* 0x000fe20000004800 */
/*0160*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc60000011603 */
/*0170*/ @!P1 LDS R5, [R4] ; /* 0x0000000004059984 */
/* 0x000e240000000800 */
/*0180*/ @!P1 FADD R2, R2, R5 ; /* 0x0000000502029221 */
/* 0x001fca0000000000 */
/*0190*/ @!P1 STS [R7.X4], R2 ; /* 0x0000000207009388 */
/* 0x0001e80000004800 */
/*01a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*01b0*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f25270 */
/*01c0*/ @P1 BRA 0x130 ; /* 0xffffff6000001947 */
/* 0x001fea000383ffff */
/*01d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*01e0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*01f0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*0200*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x000fca00078e0003 */
/*0210*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | __global__ void reduce_add_kernel(const float *d_in, float *d_out, int input_size) {
extern __shared__ float sdata[];
int index = threadIdx.x + blockDim.x * blockIdx.x;
int myId = threadIdx.x;
// Put whole block in shared memory
sdata[myId] = (index < input_size) ? d_in[index] : 0;
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i>>=1) {
if (myId < i) {
sdata[myId] = sdata[myId] + sdata[myId+i];
}
__syncthreads();
}
if (myId == 0) {
d_out[blockIdx.x] = sdata[0];
}
}
int next_power_of_two(int number) {
int result = 1;
while(result < number) {
result <<= 1;
}
return result;
}
void primitive_reduce_add(float *d_input, float *d_result, int input_size) {
const int SIZE_AS_POT = next_power_of_two(input_size);
int shared_size = sizeof(float) * SIZE_AS_POT;
reduce_add_kernel<<<1, SIZE_AS_POT, shared_size>>>(d_input, d_result, input_size);
} | .file "tmpxft_0009d2de_00000000-6_reduce.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2031:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2031:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z17next_power_of_twoi
.type _Z17next_power_of_twoi, @function
_Z17next_power_of_twoi:
.LFB2027:
.cfi_startproc
endbr64
cmpl $1, %edi
jle .L6
movl $1, %eax
.L5:
addl %eax, %eax
cmpl %eax, %edi
jg .L5
ret
.L6:
movl $1, %eax
ret
.cfi_endproc
.LFE2027:
.size _Z17next_power_of_twoi, .-_Z17next_power_of_twoi
.globl _Z41__device_stub__Z17reduce_add_kernelPKfPfiPKfPfi
.type _Z41__device_stub__Z17reduce_add_kernelPKfPfiPKfPfi, @function
_Z41__device_stub__Z17reduce_add_kernelPKfPfiPKfPfi:
.LFB2053:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L12
.L8:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L13
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17reduce_add_kernelPKfPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L8
.L13:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z41__device_stub__Z17reduce_add_kernelPKfPfiPKfPfi, .-_Z41__device_stub__Z17reduce_add_kernelPKfPfiPKfPfi
.globl _Z17reduce_add_kernelPKfPfi
.type _Z17reduce_add_kernelPKfPfi, @function
_Z17reduce_add_kernelPKfPfi:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z17reduce_add_kernelPKfPfiPKfPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z17reduce_add_kernelPKfPfi, .-_Z17reduce_add_kernelPKfPfi
.globl _Z20primitive_reduce_addPfS_i
.type _Z20primitive_reduce_addPfS_i, @function
_Z20primitive_reduce_addPfS_i:
.LFB2028:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movq %rsi, %r12
movl %edx, %ebx
cmpl $1, %edx
jle .L20
movl $1, %r8d
.L18:
addl %r8d, %r8d
cmpl %r8d, %ebx
jg .L18
.L17:
movl %r8d, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
sall $2, %r8d
movl $0, %r9d
movslq %r8d, %r8
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L23
.L16:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
movl $1, %r8d
jmp .L17
.L23:
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z41__device_stub__Z17reduce_add_kernelPKfPfiPKfPfi
jmp .L16
.cfi_endproc
.LFE2028:
.size _Z20primitive_reduce_addPfS_i, .-_Z20primitive_reduce_addPfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z17reduce_add_kernelPKfPfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z17reduce_add_kernelPKfPfi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | __global__ void reduce_add_kernel(const float *d_in, float *d_out, int input_size) {
extern __shared__ float sdata[];
int index = threadIdx.x + blockDim.x * blockIdx.x;
int myId = threadIdx.x;
// Put whole block in shared memory
sdata[myId] = (index < input_size) ? d_in[index] : 0;
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i>>=1) {
if (myId < i) {
sdata[myId] = sdata[myId] + sdata[myId+i];
}
__syncthreads();
}
if (myId == 0) {
d_out[blockIdx.x] = sdata[0];
}
}
int next_power_of_two(int number) {
int result = 1;
while(result < number) {
result <<= 1;
}
return result;
}
void primitive_reduce_add(float *d_input, float *d_result, int input_size) {
const int SIZE_AS_POT = next_power_of_two(input_size);
int shared_size = sizeof(float) * SIZE_AS_POT;
reduce_add_kernel<<<1, SIZE_AS_POT, shared_size>>>(d_input, d_result, input_size);
} | #include <hip/hip_runtime.h>
__global__ void reduce_add_kernel(const float *d_in, float *d_out, int input_size) {
extern __shared__ float sdata[];
int index = threadIdx.x + blockDim.x * blockIdx.x;
int myId = threadIdx.x;
// Put whole block in shared memory
sdata[myId] = (index < input_size) ? d_in[index] : 0;
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i>>=1) {
if (myId < i) {
sdata[myId] = sdata[myId] + sdata[myId+i];
}
__syncthreads();
}
if (myId == 0) {
d_out[blockIdx.x] = sdata[0];
}
}
int next_power_of_two(int number) {
int result = 1;
while(result < number) {
result <<= 1;
}
return result;
}
void primitive_reduce_add(float *d_input, float *d_result, int input_size) {
const int SIZE_AS_POT = next_power_of_two(input_size);
int shared_size = sizeof(float) * SIZE_AS_POT;
reduce_add_kernel<<<1, SIZE_AS_POT, shared_size>>>(d_input, d_result, input_size);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
__global__ void reduce_add_kernel(const float *d_in, float *d_out, int input_size) {
extern __shared__ float sdata[];
int index = threadIdx.x + blockDim.x * blockIdx.x;
int myId = threadIdx.x;
// Put whole block in shared memory
sdata[myId] = (index < input_size) ? d_in[index] : 0;
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i>>=1) {
if (myId < i) {
sdata[myId] = sdata[myId] + sdata[myId+i];
}
__syncthreads();
}
if (myId == 0) {
d_out[blockIdx.x] = sdata[0];
}
}
int next_power_of_two(int number) {
int result = 1;
while(result < number) {
result <<= 1;
}
return result;
}
void primitive_reduce_add(float *d_input, float *d_result, int input_size) {
const int SIZE_AS_POT = next_power_of_two(input_size);
int shared_size = sizeof(float) * SIZE_AS_POT;
reduce_add_kernel<<<1, SIZE_AS_POT, shared_size>>>(d_input, d_result, input_size);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z17reduce_add_kernelPKfPfi
.globl _Z17reduce_add_kernelPKfPfi
.p2align 8
.type _Z17reduce_add_kernelPKfPfi,@function
_Z17reduce_add_kernelPKfPfi:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x10
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
v_mov_b32_e32 v2, 0
v_cmp_gt_i32_e32 vcc_lo, s4, v1
s_and_saveexec_b32 s4, vcc_lo
s_cbranch_execz .LBB0_2
s_load_b64 s[6:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
global_load_b32 v2, v[1:2], off
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s4
v_lshl_add_u32 v1, v0, 2, 0
s_cmp_lt_u32 s3, 2
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB0_4
.p2align 6
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s5
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s3, 4
s_mov_b32 s3, s4
.LBB0_4:
buffer_gl0_inv
s_cbranch_scc1 .LBB0_7
s_lshr_b32 s4, s3, 1
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB0_3
v_add_nc_u32_e32 v2, s4, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v2, v2, 2, 0
ds_load_b32 v3, v1
ds_load_b32 v2, v2
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB0_3
.LBB0_7:
s_mov_b32 s3, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_9
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_mov_b32 s3, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v1, v0, s[0:1]
.LBB0_9:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17reduce_add_kernelPKfPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z17reduce_add_kernelPKfPfi, .Lfunc_end0-_Z17reduce_add_kernelPKfPfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17reduce_add_kernelPKfPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17reduce_add_kernelPKfPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
__global__ void reduce_add_kernel(const float *d_in, float *d_out, int input_size) {
extern __shared__ float sdata[];
int index = threadIdx.x + blockDim.x * blockIdx.x;
int myId = threadIdx.x;
// Put whole block in shared memory
sdata[myId] = (index < input_size) ? d_in[index] : 0;
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i>>=1) {
if (myId < i) {
sdata[myId] = sdata[myId] + sdata[myId+i];
}
__syncthreads();
}
if (myId == 0) {
d_out[blockIdx.x] = sdata[0];
}
}
int next_power_of_two(int number) {
int result = 1;
while(result < number) {
result <<= 1;
}
return result;
}
void primitive_reduce_add(float *d_input, float *d_result, int input_size) {
const int SIZE_AS_POT = next_power_of_two(input_size);
int shared_size = sizeof(float) * SIZE_AS_POT;
reduce_add_kernel<<<1, SIZE_AS_POT, shared_size>>>(d_input, d_result, input_size);
} | .text
.file "reduce.hip"
.globl _Z32__device_stub__reduce_add_kernelPKfPfi # -- Begin function _Z32__device_stub__reduce_add_kernelPKfPfi
.p2align 4, 0x90
.type _Z32__device_stub__reduce_add_kernelPKfPfi,@function
_Z32__device_stub__reduce_add_kernelPKfPfi: # @_Z32__device_stub__reduce_add_kernelPKfPfi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17reduce_add_kernelPKfPfi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z32__device_stub__reduce_add_kernelPKfPfi, .Lfunc_end0-_Z32__device_stub__reduce_add_kernelPKfPfi
.cfi_endproc
# -- End function
.globl _Z17next_power_of_twoi # -- Begin function _Z17next_power_of_twoi
.p2align 4, 0x90
.type _Z17next_power_of_twoi,@function
_Z17next_power_of_twoi: # @_Z17next_power_of_twoi
.cfi_startproc
# %bb.0:
movl $1, %ecx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %ecx, %eax
leal (%rax,%rax), %ecx
cmpl %edi, %eax
jl .LBB1_1
# %bb.2:
# kill: def $eax killed $eax killed $rax
retq
.Lfunc_end1:
.size _Z17next_power_of_twoi, .Lfunc_end1-_Z17next_power_of_twoi
.cfi_endproc
# -- End function
.globl _Z20primitive_reduce_addPfS_i # -- Begin function _Z20primitive_reduce_addPfS_i
.p2align 4, 0x90
.type _Z20primitive_reduce_addPfS_i,@function
_Z20primitive_reduce_addPfS_i: # @_Z20primitive_reduce_addPfS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %ebx
movq %rsi, %r14
movq %rdi, %r15
movl $1, %ecx
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
movl %ecx, %eax
leal (%rax,%rax), %ecx
cmpl %ebx, %eax
jl .LBB2_1
# %bb.2: # %_Z17next_power_of_twoi.exit
leal (,%rax,4), %ecx
movslq %ecx, %r8
movl %eax, %edx
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rdi, %rdx
orq $1, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movl %ebx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17reduce_add_kernelPKfPfi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_4:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z20primitive_reduce_addPfS_i, .Lfunc_end2-_Z20primitive_reduce_addPfS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17reduce_add_kernelPKfPfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z17reduce_add_kernelPKfPfi,@object # @_Z17reduce_add_kernelPKfPfi
.section .rodata,"a",@progbits
.globl _Z17reduce_add_kernelPKfPfi
.p2align 3, 0x0
_Z17reduce_add_kernelPKfPfi:
.quad _Z32__device_stub__reduce_add_kernelPKfPfi
.size _Z17reduce_add_kernelPKfPfi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z17reduce_add_kernelPKfPfi"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z32__device_stub__reduce_add_kernelPKfPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z17reduce_add_kernelPKfPfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z17reduce_add_kernelPKfPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R0, RZ, RZ, RZ ; /* 0x000000ffff007224 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R6, c[0x0][0x0], R7 ; /* 0x0000000006027a24 */
/* 0x001fca00078e0207 */
/*0060*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06270 */
/*0070*/ @!P0 MOV R3, 0x4 ; /* 0x0000000400038802 */
/* 0x000fca0000000f00 */
/*0080*/ @!P0 IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002028625 */
/* 0x000fca00078e0203 */
/*0090*/ @!P0 LDG.E R0, [R2.64] ; /* 0x0000000602008981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*00b0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f05270 */
/*00c0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00d0*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf25270 */
/*00e0*/ STS [R7.X4], R0 ; /* 0x0000000007007388 */
/* 0x0041e80000004800 */
/*00f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*0100*/ @!P1 BRA 0x1d0 ; /* 0x000000c000009947 */
/* 0x000fea0003800000 */
/*0110*/ SHF.L.U32 R0, R7, 0x2, RZ ; /* 0x0000000207007819 */
/* 0x001fe200000006ff */
/*0120*/ IMAD.U32 R3, RZ, RZ, UR4 ; /* 0x00000004ff037e24 */
/* 0x000fca000f8e00ff */
/*0130*/ ISETP.GE.AND P1, PT, R7, R3, PT ; /* 0x000000030700720c */
/* 0x000fda0003f26270 */
/*0140*/ @!P1 LEA R4, R3, R0, 0x2 ; /* 0x0000000003049211 */
/* 0x000fe200078e10ff */
/*0150*/ @!P1 LDS R2, [R7.X4] ; /* 0x0000000007029984 */
/* 0x000fe20000004800 */
/*0160*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc60000011603 */
/*0170*/ @!P1 LDS R5, [R4] ; /* 0x0000000004059984 */
/* 0x000e240000000800 */
/*0180*/ @!P1 FADD R2, R2, R5 ; /* 0x0000000502029221 */
/* 0x001fca0000000000 */
/*0190*/ @!P1 STS [R7.X4], R2 ; /* 0x0000000207009388 */
/* 0x0001e80000004800 */
/*01a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*01b0*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f25270 */
/*01c0*/ @P1 BRA 0x130 ; /* 0xffffff6000001947 */
/* 0x001fea000383ffff */
/*01d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*01e0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*01f0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*0200*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x000fca00078e0003 */
/*0210*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z17reduce_add_kernelPKfPfi
.globl _Z17reduce_add_kernelPKfPfi
.p2align 8
.type _Z17reduce_add_kernelPKfPfi,@function
_Z17reduce_add_kernelPKfPfi:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x10
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
v_mov_b32_e32 v2, 0
v_cmp_gt_i32_e32 vcc_lo, s4, v1
s_and_saveexec_b32 s4, vcc_lo
s_cbranch_execz .LBB0_2
s_load_b64 s[6:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
global_load_b32 v2, v[1:2], off
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s4
v_lshl_add_u32 v1, v0, 2, 0
s_cmp_lt_u32 s3, 2
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB0_4
.p2align 6
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s5
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s3, 4
s_mov_b32 s3, s4
.LBB0_4:
buffer_gl0_inv
s_cbranch_scc1 .LBB0_7
s_lshr_b32 s4, s3, 1
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB0_3
v_add_nc_u32_e32 v2, s4, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v2, v2, 2, 0
ds_load_b32 v3, v1
ds_load_b32 v2, v2
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB0_3
.LBB0_7:
s_mov_b32 s3, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_9
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_mov_b32 s3, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v1, v0, s[0:1]
.LBB0_9:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17reduce_add_kernelPKfPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z17reduce_add_kernelPKfPfi, .Lfunc_end0-_Z17reduce_add_kernelPKfPfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17reduce_add_kernelPKfPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17reduce_add_kernelPKfPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0009d2de_00000000-6_reduce.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2031:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2031:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z17next_power_of_twoi
.type _Z17next_power_of_twoi, @function
_Z17next_power_of_twoi:
.LFB2027:
.cfi_startproc
endbr64
cmpl $1, %edi
jle .L6
movl $1, %eax
.L5:
addl %eax, %eax
cmpl %eax, %edi
jg .L5
ret
.L6:
movl $1, %eax
ret
.cfi_endproc
.LFE2027:
.size _Z17next_power_of_twoi, .-_Z17next_power_of_twoi
.globl _Z41__device_stub__Z17reduce_add_kernelPKfPfiPKfPfi
.type _Z41__device_stub__Z17reduce_add_kernelPKfPfiPKfPfi, @function
_Z41__device_stub__Z17reduce_add_kernelPKfPfiPKfPfi:
.LFB2053:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L12
.L8:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L13
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17reduce_add_kernelPKfPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L8
.L13:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z41__device_stub__Z17reduce_add_kernelPKfPfiPKfPfi, .-_Z41__device_stub__Z17reduce_add_kernelPKfPfiPKfPfi
.globl _Z17reduce_add_kernelPKfPfi
.type _Z17reduce_add_kernelPKfPfi, @function
_Z17reduce_add_kernelPKfPfi:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z17reduce_add_kernelPKfPfiPKfPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z17reduce_add_kernelPKfPfi, .-_Z17reduce_add_kernelPKfPfi
.globl _Z20primitive_reduce_addPfS_i
.type _Z20primitive_reduce_addPfS_i, @function
_Z20primitive_reduce_addPfS_i:
.LFB2028:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movq %rsi, %r12
movl %edx, %ebx
cmpl $1, %edx
jle .L20
movl $1, %r8d
.L18:
addl %r8d, %r8d
cmpl %r8d, %ebx
jg .L18
.L17:
movl %r8d, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
sall $2, %r8d
movl $0, %r9d
movslq %r8d, %r8
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L23
.L16:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
movl $1, %r8d
jmp .L17
.L23:
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z41__device_stub__Z17reduce_add_kernelPKfPfiPKfPfi
jmp .L16
.cfi_endproc
.LFE2028:
.size _Z20primitive_reduce_addPfS_i, .-_Z20primitive_reduce_addPfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z17reduce_add_kernelPKfPfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z17reduce_add_kernelPKfPfi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "reduce.hip"
.globl _Z32__device_stub__reduce_add_kernelPKfPfi # -- Begin function _Z32__device_stub__reduce_add_kernelPKfPfi
.p2align 4, 0x90
.type _Z32__device_stub__reduce_add_kernelPKfPfi,@function
_Z32__device_stub__reduce_add_kernelPKfPfi: # @_Z32__device_stub__reduce_add_kernelPKfPfi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17reduce_add_kernelPKfPfi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z32__device_stub__reduce_add_kernelPKfPfi, .Lfunc_end0-_Z32__device_stub__reduce_add_kernelPKfPfi
.cfi_endproc
# -- End function
.globl _Z17next_power_of_twoi # -- Begin function _Z17next_power_of_twoi
.p2align 4, 0x90
.type _Z17next_power_of_twoi,@function
_Z17next_power_of_twoi: # @_Z17next_power_of_twoi
.cfi_startproc
# %bb.0:
movl $1, %ecx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %ecx, %eax
leal (%rax,%rax), %ecx
cmpl %edi, %eax
jl .LBB1_1
# %bb.2:
# kill: def $eax killed $eax killed $rax
retq
.Lfunc_end1:
.size _Z17next_power_of_twoi, .Lfunc_end1-_Z17next_power_of_twoi
.cfi_endproc
# -- End function
.globl _Z20primitive_reduce_addPfS_i # -- Begin function _Z20primitive_reduce_addPfS_i
.p2align 4, 0x90
.type _Z20primitive_reduce_addPfS_i,@function
_Z20primitive_reduce_addPfS_i: # @_Z20primitive_reduce_addPfS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %ebx
movq %rsi, %r14
movq %rdi, %r15
movl $1, %ecx
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
movl %ecx, %eax
leal (%rax,%rax), %ecx
cmpl %ebx, %eax
jl .LBB2_1
# %bb.2: # %_Z17next_power_of_twoi.exit
leal (,%rax,4), %ecx
movslq %ecx, %r8
movl %eax, %edx
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rdi, %rdx
orq $1, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movl %ebx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17reduce_add_kernelPKfPfi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_4:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z20primitive_reduce_addPfS_i, .Lfunc_end2-_Z20primitive_reduce_addPfS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17reduce_add_kernelPKfPfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z17reduce_add_kernelPKfPfi,@object # @_Z17reduce_add_kernelPKfPfi
.section .rodata,"a",@progbits
.globl _Z17reduce_add_kernelPKfPfi
.p2align 3, 0x0
_Z17reduce_add_kernelPKfPfi:
.quad _Z32__device_stub__reduce_add_kernelPKfPfi
.size _Z17reduce_add_kernelPKfPfi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z17reduce_add_kernelPKfPfi"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z32__device_stub__reduce_add_kernelPKfPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z17reduce_add_kernelPKfPfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void callOperation(int *a, int *res, int x, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
res[tid] = a[tid] * x;
} | code for sm_80
Function : _Z13callOperationPiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x174], P0 ; /* 0x00005d0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R0, c[0x0][0x174], R3 ; /* 0x00005d0000007a24 */
/* 0x000fe200078e0203 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fcc00078e0205 */
/*00e0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0205 */
/*0100*/ IMAD R7, R2, c[0x0][0x170], RZ ; /* 0x00005c0002077a24 */
/* 0x004fca00078e02ff */
/*0110*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0120*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0130*/ BRA 0x130; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void callOperation(int *a, int *res, int x, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
res[tid] = a[tid] * x;
} | .file "tmpxft_001593b9_00000000-6_callOperation.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z13callOperationPiS_iiPiS_ii
.type _Z37__device_stub__Z13callOperationPiS_iiPiS_ii, @function
_Z37__device_stub__Z13callOperationPiS_iiPiS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13callOperationPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z37__device_stub__Z13callOperationPiS_iiPiS_ii, .-_Z37__device_stub__Z13callOperationPiS_iiPiS_ii
.globl _Z13callOperationPiS_ii
.type _Z13callOperationPiS_ii, @function
_Z13callOperationPiS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z13callOperationPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z13callOperationPiS_ii, .-_Z13callOperationPiS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13callOperationPiS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13callOperationPiS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void callOperation(int *a, int *res, int x, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
res[tid] = a[tid] * x;
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void callOperation(int *a, int *res, int x, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
res[tid] = a[tid] * x;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void callOperation(int *a, int *res, int x, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
res[tid] = a[tid] * x;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13callOperationPiS_ii
.globl _Z13callOperationPiS_ii
.p2align 8
.type _Z13callOperationPiS_ii,@function
_Z13callOperationPiS_ii:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b32 s2, s[0:1], 0x14
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s3, 0xffff
s_lshr_b32 s3, s3, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s3, v[3:4]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v0, s2, v[1:2]
s_load_b32 s0, s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_mul_lo_u32 v2, v2, s0
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13callOperationPiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13callOperationPiS_ii, .Lfunc_end0-_Z13callOperationPiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13callOperationPiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13callOperationPiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void callOperation(int *a, int *res, int x, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
res[tid] = a[tid] * x;
} | .text
.file "callOperation.hip"
.globl _Z28__device_stub__callOperationPiS_ii # -- Begin function _Z28__device_stub__callOperationPiS_ii
.p2align 4, 0x90
.type _Z28__device_stub__callOperationPiS_ii,@function
_Z28__device_stub__callOperationPiS_ii: # @_Z28__device_stub__callOperationPiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13callOperationPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z28__device_stub__callOperationPiS_ii, .Lfunc_end0-_Z28__device_stub__callOperationPiS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13callOperationPiS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13callOperationPiS_ii,@object # @_Z13callOperationPiS_ii
.section .rodata,"a",@progbits
.globl _Z13callOperationPiS_ii
.p2align 3, 0x0
_Z13callOperationPiS_ii:
.quad _Z28__device_stub__callOperationPiS_ii
.size _Z13callOperationPiS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13callOperationPiS_ii"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__callOperationPiS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13callOperationPiS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z13callOperationPiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x174], P0 ; /* 0x00005d0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R0, c[0x0][0x174], R3 ; /* 0x00005d0000007a24 */
/* 0x000fe200078e0203 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fcc00078e0205 */
/*00e0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0205 */
/*0100*/ IMAD R7, R2, c[0x0][0x170], RZ ; /* 0x00005c0002077a24 */
/* 0x004fca00078e02ff */
/*0110*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0120*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0130*/ BRA 0x130; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13callOperationPiS_ii
.globl _Z13callOperationPiS_ii
.p2align 8
.type _Z13callOperationPiS_ii,@function
_Z13callOperationPiS_ii:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b32 s2, s[0:1], 0x14
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s3, 0xffff
s_lshr_b32 s3, s3, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s3, v[3:4]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v0, s2, v[1:2]
s_load_b32 s0, s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_mul_lo_u32 v2, v2, s0
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13callOperationPiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13callOperationPiS_ii, .Lfunc_end0-_Z13callOperationPiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13callOperationPiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13callOperationPiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001593b9_00000000-6_callOperation.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z13callOperationPiS_iiPiS_ii
.type _Z37__device_stub__Z13callOperationPiS_iiPiS_ii, @function
_Z37__device_stub__Z13callOperationPiS_iiPiS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13callOperationPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z37__device_stub__Z13callOperationPiS_iiPiS_ii, .-_Z37__device_stub__Z13callOperationPiS_iiPiS_ii
.globl _Z13callOperationPiS_ii
.type _Z13callOperationPiS_ii, @function
_Z13callOperationPiS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z13callOperationPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z13callOperationPiS_ii, .-_Z13callOperationPiS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13callOperationPiS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13callOperationPiS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "callOperation.hip"
.globl _Z28__device_stub__callOperationPiS_ii # -- Begin function _Z28__device_stub__callOperationPiS_ii
.p2align 4, 0x90
.type _Z28__device_stub__callOperationPiS_ii,@function
_Z28__device_stub__callOperationPiS_ii: # @_Z28__device_stub__callOperationPiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13callOperationPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z28__device_stub__callOperationPiS_ii, .Lfunc_end0-_Z28__device_stub__callOperationPiS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13callOperationPiS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13callOperationPiS_ii,@object # @_Z13callOperationPiS_ii
.section .rodata,"a",@progbits
.globl _Z13callOperationPiS_ii
.p2align 3, 0x0
_Z13callOperationPiS_ii:
.quad _Z28__device_stub__callOperationPiS_ii
.size _Z13callOperationPiS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13callOperationPiS_ii"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__callOperationPiS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13callOperationPiS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21) {
comp = -0.0f / +0.0f - -0.0f / (var_2 + var_3);
float tmp_1 = log10f((-1.9931E36f / (+1.2540E-37f - -1.7856E28f)));
comp += tmp_1 - logf((var_4 / var_5 / var_6));
for (int i=0; i < var_1; ++i) {
float tmp_2 = fmodf(var_7 / -1.7238E-36f, (var_8 / -1.7777E-15f));
comp += tmp_2 + -1.8309E-36f - (var_9 + (-1.7789E-44f - var_10));
comp = (-1.5996E-9f / (var_11 + (-1.3240E36f * var_12 - var_13 - var_14)));
comp += +1.3123E-35f - var_15 + var_16;
}
if (comp == powf(-1.4214E-28f / var_17 - (-1.7691E-14f / var_18 * +1.7414E-42f), (var_19 - +1.8289E36f - +1.6617E-36f))) {
comp += atan2f(-1.0248E35f, -1.0499E-11f - var_20 / var_21 * -1.0678E-37f * +1.5149E-44f);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22);
cudaDeviceSynchronize();
return 0;
} | .file "tmpxft_001ad569_00000000-6_test.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11initPointerf
.type _Z11initPointerf, @function
_Z11initPointerf:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movd %xmm0, %ebx
movl $40, %edi
call malloc@PLT
movq %rax, %rdx
leaq 40(%rax), %rcx
.L4:
movl %ebx, (%rdx)
addq $4, %rdx
cmpq %rcx, %rdx
jne .L4
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z11initPointerf, .-_Z11initPointerf
.globl _Z46__device_stub__Z7computefifffffffffffffffffffffiffffffffffffffffffff
.type _Z46__device_stub__Z7computefifffffffffffffffffffffiffffffffffffffffffff, @function
_Z46__device_stub__Z7computefifffffffffffffffffffffiffffffffffffffffffff:
.LFB2083:
.cfi_startproc
endbr64
subq $312, %rsp
.cfi_def_cfa_offset 320
movss %xmm0, 44(%rsp)
movl %edi, 40(%rsp)
movss %xmm1, 36(%rsp)
movss %xmm2, 32(%rsp)
movss %xmm3, 28(%rsp)
movss %xmm4, 24(%rsp)
movss %xmm5, 20(%rsp)
movss %xmm6, 16(%rsp)
movss %xmm7, 12(%rsp)
movq %fs:40, %rax
movq %rax, 296(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 28(%rsp), %rax
movq %rax, 144(%rsp)
leaq 24(%rsp), %rax
movq %rax, 152(%rsp)
leaq 20(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 12(%rsp), %rax
movq %rax, 176(%rsp)
leaq 320(%rsp), %rax
movq %rax, 184(%rsp)
leaq 328(%rsp), %rax
movq %rax, 192(%rsp)
leaq 336(%rsp), %rax
movq %rax, 200(%rsp)
leaq 344(%rsp), %rax
movq %rax, 208(%rsp)
leaq 352(%rsp), %rax
movq %rax, 216(%rsp)
leaq 360(%rsp), %rax
movq %rax, 224(%rsp)
leaq 368(%rsp), %rax
movq %rax, 232(%rsp)
leaq 376(%rsp), %rax
movq %rax, 240(%rsp)
leaq 384(%rsp), %rax
movq %rax, 248(%rsp)
leaq 392(%rsp), %rax
movq %rax, 256(%rsp)
leaq 400(%rsp), %rax
movq %rax, 264(%rsp)
leaq 408(%rsp), %rax
movq %rax, 272(%rsp)
leaq 416(%rsp), %rax
movq %rax, 280(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 296(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $312, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 328
pushq 56(%rsp)
.cfi_def_cfa_offset 336
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z7computefiffffffffffffffffffff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 320
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z46__device_stub__Z7computefifffffffffffffffffffffiffffffffffffffffffff, .-_Z46__device_stub__Z7computefifffffffffffffffffffffiffffffffffffffffffff
.globl _Z7computefiffffffffffffffffffff
.type _Z7computefiffffffffffffffffffff, @function
_Z7computefiffffffffffffffffffff:
.LFB2084:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movss 224(%rsp), %xmm8
movss %xmm8, 96(%rsp)
movss 216(%rsp), %xmm8
movss %xmm8, 88(%rsp)
movss 208(%rsp), %xmm8
movss %xmm8, 80(%rsp)
movss 200(%rsp), %xmm8
movss %xmm8, 72(%rsp)
movss 192(%rsp), %xmm8
movss %xmm8, 64(%rsp)
movss 184(%rsp), %xmm8
movss %xmm8, 56(%rsp)
movss 176(%rsp), %xmm8
movss %xmm8, 48(%rsp)
movss 168(%rsp), %xmm8
movss %xmm8, 40(%rsp)
movss 160(%rsp), %xmm8
movss %xmm8, 32(%rsp)
movss 152(%rsp), %xmm8
movss %xmm8, 24(%rsp)
movss 144(%rsp), %xmm8
movss %xmm8, 16(%rsp)
movss 136(%rsp), %xmm8
movss %xmm8, 8(%rsp)
movss 128(%rsp), %xmm8
movss %xmm8, (%rsp)
call _Z46__device_stub__Z7computefifffffffffffffffffffffiffffffffffffffffffff
addq $120, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z7computefiffffffffffffffffffff, .-_Z7computefiffffffffffffffffffff
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $216, %rsp
.cfi_def_cfa_offset 240
movq %rsi, %rbx
movq 8(%rsi), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 168(%rsp)
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movq 24(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 160(%rsp)
movq 32(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 152(%rsp)
movq 40(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 144(%rsp)
movq 48(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 136(%rsp)
movq 56(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 128(%rsp)
movq 64(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 120(%rsp)
movq 72(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 112(%rsp)
movq 80(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 104(%rsp)
movq 88(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 96(%rsp)
movq 96(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 88(%rsp)
movq 104(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 80(%rsp)
movq 112(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 72(%rsp)
movq 120(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 64(%rsp)
movq 128(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 56(%rsp)
movq 136(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 48(%rsp)
movq 144(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 40(%rsp)
movq 152(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 32(%rsp)
movq 160(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 24(%rsp)
movq 168(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 16(%rsp)
movq 176(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 8(%rsp)
movl $1, 196(%rsp)
movl $1, 200(%rsp)
movl $1, 184(%rsp)
movl $1, 188(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 196(%rsp), %rdx
movl $1, %ecx
movq 184(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L16:
call cudaDeviceSynchronize@PLT
movl $0, %eax
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pxor %xmm0, %xmm0
cvtsd2ss 168(%rsp), %xmm0
subq $112, %rsp
.cfi_def_cfa_offset 352
pxor %xmm1, %xmm1
cvtsd2ss 120(%rsp), %xmm1
movss %xmm1, 96(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 128(%rsp), %xmm1
movss %xmm1, 88(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 136(%rsp), %xmm1
movss %xmm1, 80(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 144(%rsp), %xmm1
movss %xmm1, 72(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 152(%rsp), %xmm1
movss %xmm1, 64(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 160(%rsp), %xmm1
movss %xmm1, 56(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 168(%rsp), %xmm1
movss %xmm1, 48(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 176(%rsp), %xmm1
movss %xmm1, 40(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 184(%rsp), %xmm1
movss %xmm1, 32(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 192(%rsp), %xmm1
movss %xmm1, 24(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 200(%rsp), %xmm1
movss %xmm1, 16(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 208(%rsp), %xmm1
movss %xmm1, 8(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 216(%rsp), %xmm1
movss %xmm1, (%rsp)
pxor %xmm7, %xmm7
cvtsd2ss 224(%rsp), %xmm7
pxor %xmm6, %xmm6
cvtsd2ss 232(%rsp), %xmm6
pxor %xmm5, %xmm5
cvtsd2ss 240(%rsp), %xmm5
pxor %xmm4, %xmm4
cvtsd2ss 248(%rsp), %xmm4
pxor %xmm3, %xmm3
cvtsd2ss 256(%rsp), %xmm3
pxor %xmm2, %xmm2
cvtsd2ss 264(%rsp), %xmm2
pxor %xmm1, %xmm1
cvtsd2ss 272(%rsp), %xmm1
movl %ebp, %edi
call _Z46__device_stub__Z7computefifffffffffffffffffffffiffffffffffffffffffff
addq $112, %rsp
.cfi_def_cfa_offset 240
jmp .L16
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z7computefiffffffffffffffffffff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7computefiffffffffffffffffffff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21) {
comp = -0.0f / +0.0f - -0.0f / (var_2 + var_3);
float tmp_1 = log10f((-1.9931E36f / (+1.2540E-37f - -1.7856E28f)));
comp += tmp_1 - logf((var_4 / var_5 / var_6));
for (int i=0; i < var_1; ++i) {
float tmp_2 = fmodf(var_7 / -1.7238E-36f, (var_8 / -1.7777E-15f));
comp += tmp_2 + -1.8309E-36f - (var_9 + (-1.7789E-44f - var_10));
comp = (-1.5996E-9f / (var_11 + (-1.3240E36f * var_12 - var_13 - var_14)));
comp += +1.3123E-35f - var_15 + var_16;
}
if (comp == powf(-1.4214E-28f / var_17 - (-1.7691E-14f / var_18 * +1.7414E-42f), (var_19 - +1.8289E36f - +1.6617E-36f))) {
comp += atan2f(-1.0248E35f, -1.0499E-11f - var_20 / var_21 * -1.0678E-37f * +1.5149E-44f);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22);
cudaDeviceSynchronize();
return 0;
} | /* This is a automatically generated test. Do not modify */
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21) {
comp = -0.0f / +0.0f - -0.0f / (var_2 + var_3);
float tmp_1 = log10f((-1.9931E36f / (+1.2540E-37f - -1.7856E28f)));
comp += tmp_1 - logf((var_4 / var_5 / var_6));
for (int i=0; i < var_1; ++i) {
float tmp_2 = fmodf(var_7 / -1.7238E-36f, (var_8 / -1.7777E-15f));
comp += tmp_2 + -1.8309E-36f - (var_9 + (-1.7789E-44f - var_10));
comp = (-1.5996E-9f / (var_11 + (-1.3240E36f * var_12 - var_13 - var_14)));
comp += +1.3123E-35f - var_15 + var_16;
}
if (comp == powf(-1.4214E-28f / var_17 - (-1.7691E-14f / var_18 * +1.7414E-42f), (var_19 - +1.8289E36f - +1.6617E-36f))) {
comp += atan2f(-1.0248E35f, -1.0499E-11f - var_20 / var_21 * -1.0678E-37f * +1.5149E-44f);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22);
hipDeviceSynchronize();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* This is a automatically generated test. Do not modify */
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21) {
comp = -0.0f / +0.0f - -0.0f / (var_2 + var_3);
float tmp_1 = log10f((-1.9931E36f / (+1.2540E-37f - -1.7856E28f)));
comp += tmp_1 - logf((var_4 / var_5 / var_6));
for (int i=0; i < var_1; ++i) {
float tmp_2 = fmodf(var_7 / -1.7238E-36f, (var_8 / -1.7777E-15f));
comp += tmp_2 + -1.8309E-36f - (var_9 + (-1.7789E-44f - var_10));
comp = (-1.5996E-9f / (var_11 + (-1.3240E36f * var_12 - var_13 - var_14)));
comp += +1.3123E-35f - var_15 + var_16;
}
if (comp == powf(-1.4214E-28f / var_17 - (-1.7691E-14f / var_18 * +1.7414E-42f), (var_19 - +1.8289E36f - +1.6617E-36f))) {
comp += atan2f(-1.0248E35f, -1.0499E-11f - var_20 / var_21 * -1.0678E-37f * +1.5149E-44f);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22);
hipDeviceSynchronize();
return 0;
} | .text
.file "test.hip"
.globl _Z22__device_stub__computefiffffffffffffffffffff # -- Begin function _Z22__device_stub__computefiffffffffffffffffffff
.p2align 4, 0x90
.type _Z22__device_stub__computefiffffffffffffffffffff,@function
_Z22__device_stub__computefiffffffffffffffffffff: # @_Z22__device_stub__computefiffffffffffffffffffff
.cfi_startproc
# %bb.0:
subq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 288
movss %xmm0, 44(%rsp)
movl %edi, 40(%rsp)
movss %xmm1, 36(%rsp)
movss %xmm2, 32(%rsp)
movss %xmm3, 28(%rsp)
movss %xmm4, 24(%rsp)
movss %xmm5, 20(%rsp)
movss %xmm6, 16(%rsp)
movss %xmm7, 12(%rsp)
leaq 44(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rax
movq %rax, 104(%rsp)
leaq 36(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 12(%rsp), %rax
movq %rax, 160(%rsp)
leaq 288(%rsp), %rax
movq %rax, 168(%rsp)
leaq 296(%rsp), %rax
movq %rax, 176(%rsp)
leaq 304(%rsp), %rax
movq %rax, 184(%rsp)
leaq 312(%rsp), %rax
movq %rax, 192(%rsp)
leaq 320(%rsp), %rax
movq %rax, 200(%rsp)
leaq 328(%rsp), %rax
movq %rax, 208(%rsp)
leaq 336(%rsp), %rax
movq %rax, 216(%rsp)
leaq 344(%rsp), %rax
movq %rax, 224(%rsp)
leaq 352(%rsp), %rax
movq %rax, 232(%rsp)
leaq 360(%rsp), %rax
movq %rax, 240(%rsp)
leaq 368(%rsp), %rax
movq %rax, 248(%rsp)
leaq 376(%rsp), %rax
movq %rax, 256(%rsp)
leaq 384(%rsp), %rax
movq %rax, 264(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7computefiffffffffffffffffffff, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $296, %rsp # imm = 0x128
.cfi_adjust_cfa_offset -296
retq
.Lfunc_end0:
.size _Z22__device_stub__computefiffffffffffffffffffff, .Lfunc_end0-_Z22__device_stub__computefiffffffffffffffffffff
.cfi_endproc
# -- End function
.globl _Z11initPointerf # -- Begin function _Z11initPointerf
.p2align 4, 0x90
.type _Z11initPointerf,@function
_Z11initPointerf: # @_Z11initPointerf
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movss %xmm0, 4(%rsp) # 4-byte Spill
movl $40, %edi
callq malloc
movss 4(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movss %xmm0, (%rax,%rcx,4)
incq %rcx
cmpq $10, %rcx
jne .LBB1_1
# %bb.2:
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z11initPointerf, .Lfunc_end1-_Z11initPointerf
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 304
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rsi, %r14
movq 8(%rsi), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 272(%rsp) # 8-byte Spill
movq 16(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
movq 24(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 264(%rsp) # 8-byte Spill
movq 32(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 256(%rsp) # 8-byte Spill
movq 40(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 248(%rsp) # 8-byte Spill
movq 48(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 144(%rsp) # 8-byte Spill
movq 56(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 136(%rsp) # 8-byte Spill
movq 64(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 128(%rsp) # 8-byte Spill
movq 72(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 120(%rsp) # 8-byte Spill
movq 80(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 112(%rsp) # 8-byte Spill
movq 88(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 240(%rsp) # 8-byte Spill
movq 96(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 232(%rsp) # 8-byte Spill
movq 104(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 224(%rsp) # 8-byte Spill
movq 112(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 216(%rsp) # 8-byte Spill
movq 120(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 208(%rsp) # 8-byte Spill
movq 128(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 200(%rsp) # 8-byte Spill
movq 136(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 192(%rsp) # 8-byte Spill
movq 144(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 184(%rsp) # 8-byte Spill
movq 152(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 176(%rsp) # 8-byte Spill
movq 160(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 168(%rsp) # 8-byte Spill
movq 168(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 160(%rsp) # 8-byte Spill
movq 176(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 152(%rsp) # 8-byte Spill
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movsd 152(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm8
movsd 160(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm9
movsd 168(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm10
movsd 176(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm11
movsd 184(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm12
movsd 192(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm13
movsd 200(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm14
movsd 208(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm15
movsd 216(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm4
movsd 224(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm5
movsd 232(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm6
movsd 240(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm7
movsd 112(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 112(%rsp) # 4-byte Spill
movsd 120(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 120(%rsp) # 4-byte Spill
movsd 128(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 128(%rsp) # 4-byte Spill
movsd 136(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 136(%rsp) # 4-byte Spill
movsd 144(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 144(%rsp) # 4-byte Spill
movsd 248(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm3
movsd 256(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm2
movsd 264(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm1
movsd 272(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm8, 96(%rsp)
movss %xmm9, 88(%rsp)
movss %xmm10, 80(%rsp)
movss %xmm11, 72(%rsp)
movss %xmm12, 64(%rsp)
movss %xmm13, 56(%rsp)
movss %xmm14, 48(%rsp)
movss %xmm15, 40(%rsp)
movss %xmm4, 32(%rsp)
movss %xmm5, 24(%rsp)
movss %xmm6, 16(%rsp)
movss %xmm7, 8(%rsp)
movss 112(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, (%rsp)
movl %ebx, %edi
movss 144(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss 136(%rsp), %xmm5 # 4-byte Reload
# xmm5 = mem[0],zero,zero,zero
movss 128(%rsp), %xmm6 # 4-byte Reload
# xmm6 = mem[0],zero,zero,zero
movss 120(%rsp), %xmm7 # 4-byte Reload
# xmm7 = mem[0],zero,zero,zero
callq _Z22__device_stub__computefiffffffffffffffffffff
.LBB2_2:
callq hipDeviceSynchronize
xorl %eax, %eax
addq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7computefiffffffffffffffffffff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7computefiffffffffffffffffffff,@object # @_Z7computefiffffffffffffffffffff
.section .rodata,"a",@progbits
.globl _Z7computefiffffffffffffffffffff
.p2align 3, 0x0
_Z7computefiffffffffffffffffffff:
.quad _Z22__device_stub__computefiffffffffffffffffffff
.size _Z7computefiffffffffffffffffffff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7computefiffffffffffffffffffff"
.size .L__unnamed_1, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__computefiffffffffffffffffffff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7computefiffffffffffffffffffff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001ad569_00000000-6_test.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11initPointerf
.type _Z11initPointerf, @function
_Z11initPointerf:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movd %xmm0, %ebx
movl $40, %edi
call malloc@PLT
movq %rax, %rdx
leaq 40(%rax), %rcx
.L4:
movl %ebx, (%rdx)
addq $4, %rdx
cmpq %rcx, %rdx
jne .L4
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z11initPointerf, .-_Z11initPointerf
.globl _Z46__device_stub__Z7computefifffffffffffffffffffffiffffffffffffffffffff
.type _Z46__device_stub__Z7computefifffffffffffffffffffffiffffffffffffffffffff, @function
_Z46__device_stub__Z7computefifffffffffffffffffffffiffffffffffffffffffff:
.LFB2083:
.cfi_startproc
endbr64
subq $312, %rsp
.cfi_def_cfa_offset 320
movss %xmm0, 44(%rsp)
movl %edi, 40(%rsp)
movss %xmm1, 36(%rsp)
movss %xmm2, 32(%rsp)
movss %xmm3, 28(%rsp)
movss %xmm4, 24(%rsp)
movss %xmm5, 20(%rsp)
movss %xmm6, 16(%rsp)
movss %xmm7, 12(%rsp)
movq %fs:40, %rax
movq %rax, 296(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 28(%rsp), %rax
movq %rax, 144(%rsp)
leaq 24(%rsp), %rax
movq %rax, 152(%rsp)
leaq 20(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 12(%rsp), %rax
movq %rax, 176(%rsp)
leaq 320(%rsp), %rax
movq %rax, 184(%rsp)
leaq 328(%rsp), %rax
movq %rax, 192(%rsp)
leaq 336(%rsp), %rax
movq %rax, 200(%rsp)
leaq 344(%rsp), %rax
movq %rax, 208(%rsp)
leaq 352(%rsp), %rax
movq %rax, 216(%rsp)
leaq 360(%rsp), %rax
movq %rax, 224(%rsp)
leaq 368(%rsp), %rax
movq %rax, 232(%rsp)
leaq 376(%rsp), %rax
movq %rax, 240(%rsp)
leaq 384(%rsp), %rax
movq %rax, 248(%rsp)
leaq 392(%rsp), %rax
movq %rax, 256(%rsp)
leaq 400(%rsp), %rax
movq %rax, 264(%rsp)
leaq 408(%rsp), %rax
movq %rax, 272(%rsp)
leaq 416(%rsp), %rax
movq %rax, 280(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 296(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $312, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 328
pushq 56(%rsp)
.cfi_def_cfa_offset 336
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z7computefiffffffffffffffffffff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 320
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z46__device_stub__Z7computefifffffffffffffffffffffiffffffffffffffffffff, .-_Z46__device_stub__Z7computefifffffffffffffffffffffiffffffffffffffffffff
.globl _Z7computefiffffffffffffffffffff
.type _Z7computefiffffffffffffffffffff, @function
_Z7computefiffffffffffffffffffff:
.LFB2084:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movss 224(%rsp), %xmm8
movss %xmm8, 96(%rsp)
movss 216(%rsp), %xmm8
movss %xmm8, 88(%rsp)
movss 208(%rsp), %xmm8
movss %xmm8, 80(%rsp)
movss 200(%rsp), %xmm8
movss %xmm8, 72(%rsp)
movss 192(%rsp), %xmm8
movss %xmm8, 64(%rsp)
movss 184(%rsp), %xmm8
movss %xmm8, 56(%rsp)
movss 176(%rsp), %xmm8
movss %xmm8, 48(%rsp)
movss 168(%rsp), %xmm8
movss %xmm8, 40(%rsp)
movss 160(%rsp), %xmm8
movss %xmm8, 32(%rsp)
movss 152(%rsp), %xmm8
movss %xmm8, 24(%rsp)
movss 144(%rsp), %xmm8
movss %xmm8, 16(%rsp)
movss 136(%rsp), %xmm8
movss %xmm8, 8(%rsp)
movss 128(%rsp), %xmm8
movss %xmm8, (%rsp)
call _Z46__device_stub__Z7computefifffffffffffffffffffffiffffffffffffffffffff
addq $120, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z7computefiffffffffffffffffffff, .-_Z7computefiffffffffffffffffffff
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $216, %rsp
.cfi_def_cfa_offset 240
movq %rsi, %rbx
movq 8(%rsi), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 168(%rsp)
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movq 24(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 160(%rsp)
movq 32(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 152(%rsp)
movq 40(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 144(%rsp)
movq 48(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 136(%rsp)
movq 56(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 128(%rsp)
movq 64(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 120(%rsp)
movq 72(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 112(%rsp)
movq 80(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 104(%rsp)
movq 88(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 96(%rsp)
movq 96(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 88(%rsp)
movq 104(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 80(%rsp)
movq 112(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 72(%rsp)
movq 120(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 64(%rsp)
movq 128(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 56(%rsp)
movq 136(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 48(%rsp)
movq 144(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 40(%rsp)
movq 152(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 32(%rsp)
movq 160(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 24(%rsp)
movq 168(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 16(%rsp)
movq 176(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 8(%rsp)
movl $1, 196(%rsp)
movl $1, 200(%rsp)
movl $1, 184(%rsp)
movl $1, 188(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 196(%rsp), %rdx
movl $1, %ecx
movq 184(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L16:
call cudaDeviceSynchronize@PLT
movl $0, %eax
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pxor %xmm0, %xmm0
cvtsd2ss 168(%rsp), %xmm0
subq $112, %rsp
.cfi_def_cfa_offset 352
pxor %xmm1, %xmm1
cvtsd2ss 120(%rsp), %xmm1
movss %xmm1, 96(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 128(%rsp), %xmm1
movss %xmm1, 88(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 136(%rsp), %xmm1
movss %xmm1, 80(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 144(%rsp), %xmm1
movss %xmm1, 72(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 152(%rsp), %xmm1
movss %xmm1, 64(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 160(%rsp), %xmm1
movss %xmm1, 56(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 168(%rsp), %xmm1
movss %xmm1, 48(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 176(%rsp), %xmm1
movss %xmm1, 40(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 184(%rsp), %xmm1
movss %xmm1, 32(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 192(%rsp), %xmm1
movss %xmm1, 24(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 200(%rsp), %xmm1
movss %xmm1, 16(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 208(%rsp), %xmm1
movss %xmm1, 8(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 216(%rsp), %xmm1
movss %xmm1, (%rsp)
pxor %xmm7, %xmm7
cvtsd2ss 224(%rsp), %xmm7
pxor %xmm6, %xmm6
cvtsd2ss 232(%rsp), %xmm6
pxor %xmm5, %xmm5
cvtsd2ss 240(%rsp), %xmm5
pxor %xmm4, %xmm4
cvtsd2ss 248(%rsp), %xmm4
pxor %xmm3, %xmm3
cvtsd2ss 256(%rsp), %xmm3
pxor %xmm2, %xmm2
cvtsd2ss 264(%rsp), %xmm2
pxor %xmm1, %xmm1
cvtsd2ss 272(%rsp), %xmm1
movl %ebp, %edi
call _Z46__device_stub__Z7computefifffffffffffffffffffffiffffffffffffffffffff
addq $112, %rsp
.cfi_def_cfa_offset 240
jmp .L16
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z7computefiffffffffffffffffffff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7computefiffffffffffffffffffff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "test.hip"
.globl _Z22__device_stub__computefiffffffffffffffffffff # -- Begin function _Z22__device_stub__computefiffffffffffffffffffff
.p2align 4, 0x90
.type _Z22__device_stub__computefiffffffffffffffffffff,@function
_Z22__device_stub__computefiffffffffffffffffffff: # @_Z22__device_stub__computefiffffffffffffffffffff
.cfi_startproc
# %bb.0:
subq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 288
movss %xmm0, 44(%rsp)
movl %edi, 40(%rsp)
movss %xmm1, 36(%rsp)
movss %xmm2, 32(%rsp)
movss %xmm3, 28(%rsp)
movss %xmm4, 24(%rsp)
movss %xmm5, 20(%rsp)
movss %xmm6, 16(%rsp)
movss %xmm7, 12(%rsp)
leaq 44(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rax
movq %rax, 104(%rsp)
leaq 36(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 12(%rsp), %rax
movq %rax, 160(%rsp)
leaq 288(%rsp), %rax
movq %rax, 168(%rsp)
leaq 296(%rsp), %rax
movq %rax, 176(%rsp)
leaq 304(%rsp), %rax
movq %rax, 184(%rsp)
leaq 312(%rsp), %rax
movq %rax, 192(%rsp)
leaq 320(%rsp), %rax
movq %rax, 200(%rsp)
leaq 328(%rsp), %rax
movq %rax, 208(%rsp)
leaq 336(%rsp), %rax
movq %rax, 216(%rsp)
leaq 344(%rsp), %rax
movq %rax, 224(%rsp)
leaq 352(%rsp), %rax
movq %rax, 232(%rsp)
leaq 360(%rsp), %rax
movq %rax, 240(%rsp)
leaq 368(%rsp), %rax
movq %rax, 248(%rsp)
leaq 376(%rsp), %rax
movq %rax, 256(%rsp)
leaq 384(%rsp), %rax
movq %rax, 264(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7computefiffffffffffffffffffff, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $296, %rsp # imm = 0x128
.cfi_adjust_cfa_offset -296
retq
.Lfunc_end0:
.size _Z22__device_stub__computefiffffffffffffffffffff, .Lfunc_end0-_Z22__device_stub__computefiffffffffffffffffffff
.cfi_endproc
# -- End function
.globl _Z11initPointerf # -- Begin function _Z11initPointerf
.p2align 4, 0x90
.type _Z11initPointerf,@function
_Z11initPointerf: # @_Z11initPointerf
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movss %xmm0, 4(%rsp) # 4-byte Spill
movl $40, %edi
callq malloc
movss 4(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movss %xmm0, (%rax,%rcx,4)
incq %rcx
cmpq $10, %rcx
jne .LBB1_1
# %bb.2:
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z11initPointerf, .Lfunc_end1-_Z11initPointerf
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 304
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rsi, %r14
movq 8(%rsi), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 272(%rsp) # 8-byte Spill
movq 16(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
movq 24(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 264(%rsp) # 8-byte Spill
movq 32(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 256(%rsp) # 8-byte Spill
movq 40(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 248(%rsp) # 8-byte Spill
movq 48(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 144(%rsp) # 8-byte Spill
movq 56(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 136(%rsp) # 8-byte Spill
movq 64(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 128(%rsp) # 8-byte Spill
movq 72(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 120(%rsp) # 8-byte Spill
movq 80(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 112(%rsp) # 8-byte Spill
movq 88(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 240(%rsp) # 8-byte Spill
movq 96(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 232(%rsp) # 8-byte Spill
movq 104(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 224(%rsp) # 8-byte Spill
movq 112(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 216(%rsp) # 8-byte Spill
movq 120(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 208(%rsp) # 8-byte Spill
movq 128(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 200(%rsp) # 8-byte Spill
movq 136(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 192(%rsp) # 8-byte Spill
movq 144(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 184(%rsp) # 8-byte Spill
movq 152(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 176(%rsp) # 8-byte Spill
movq 160(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 168(%rsp) # 8-byte Spill
movq 168(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 160(%rsp) # 8-byte Spill
movq 176(%r14), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 152(%rsp) # 8-byte Spill
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movsd 152(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm8
movsd 160(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm9
movsd 168(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm10
movsd 176(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm11
movsd 184(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm12
movsd 192(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm13
movsd 200(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm14
movsd 208(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm15
movsd 216(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm4
movsd 224(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm5
movsd 232(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm6
movsd 240(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm7
movsd 112(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 112(%rsp) # 4-byte Spill
movsd 120(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 120(%rsp) # 4-byte Spill
movsd 128(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 128(%rsp) # 4-byte Spill
movsd 136(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 136(%rsp) # 4-byte Spill
movsd 144(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 144(%rsp) # 4-byte Spill
movsd 248(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm3
movsd 256(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm2
movsd 264(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm1
movsd 272(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm8, 96(%rsp)
movss %xmm9, 88(%rsp)
movss %xmm10, 80(%rsp)
movss %xmm11, 72(%rsp)
movss %xmm12, 64(%rsp)
movss %xmm13, 56(%rsp)
movss %xmm14, 48(%rsp)
movss %xmm15, 40(%rsp)
movss %xmm4, 32(%rsp)
movss %xmm5, 24(%rsp)
movss %xmm6, 16(%rsp)
movss %xmm7, 8(%rsp)
movss 112(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, (%rsp)
movl %ebx, %edi
movss 144(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss 136(%rsp), %xmm5 # 4-byte Reload
# xmm5 = mem[0],zero,zero,zero
movss 128(%rsp), %xmm6 # 4-byte Reload
# xmm6 = mem[0],zero,zero,zero
movss 120(%rsp), %xmm7 # 4-byte Reload
# xmm7 = mem[0],zero,zero,zero
callq _Z22__device_stub__computefiffffffffffffffffffff
.LBB2_2:
callq hipDeviceSynchronize
xorl %eax, %eax
addq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7computefiffffffffffffffffffff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7computefiffffffffffffffffffff,@object # @_Z7computefiffffffffffffffffffff
.section .rodata,"a",@progbits
.globl _Z7computefiffffffffffffffffffff
.p2align 3, 0x0
_Z7computefiffffffffffffffffffff:
.quad _Z22__device_stub__computefiffffffffffffffffffff
.size _Z7computefiffffffffffffffffffff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7computefiffffffffffffffffffff"
.size .L__unnamed_1, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__computefiffffffffffffffffffff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7computefiffffffffffffffffffff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | # include <stdio.h>
__device__ float doTheCalculation(float f) {
return f * f * f;
}
__global__ void cube(float *d_in, float *d_out) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
float f = d_in[idx];
//d_out[idx] = f * f *f;
d_out[idx] = doTheCalculation(f);
}
int main() {
const int ARRAY_SIZE = 4000;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for(int i=0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare memory pointers
float * d_in;
float * d_out;
// allocating memory for GPU variables
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
cube<<<4, 1000>>>(d_in, d_out);
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
for(int i=0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i%4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
} | code for sm_80
Function : _Z4cubePfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0050*/ IMAD R4, R3, c[0x0][0x0], R4 ; /* 0x0000000003047a24 */
/* 0x001fca00078e0204 */
/*0060*/ IMAD.WIDE R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fcc00078e0205 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fc800078e0205 */
/*0090*/ FMUL R7, R2, R2 ; /* 0x0000000202077220 */
/* 0x004fc80000400000 */
/*00a0*/ FMUL R7, R2, R7 ; /* 0x0000000702077220 */
/* 0x000fca0000400000 */
/*00b0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | # include <stdio.h>
__device__ float doTheCalculation(float f) {
return f * f * f;
}
__global__ void cube(float *d_in, float *d_out) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
float f = d_in[idx];
//d_out[idx] = f * f *f;
d_out[idx] = doTheCalculation(f);
}
int main() {
const int ARRAY_SIZE = 4000;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for(int i=0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare memory pointers
float * d_in;
float * d_out;
// allocating memory for GPU variables
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
cube<<<4, 1000>>>(d_in, d_out);
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
for(int i=0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i%4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
} | .file "tmpxft_000aa0f6_00000000-6_cube.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z16doTheCalculationf
.type _Z16doTheCalculationf, @function
_Z16doTheCalculationf:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z16doTheCalculationf, .-_Z16doTheCalculationf
.globl _Z25__device_stub__Z4cubePfS_PfS_
.type _Z25__device_stub__Z4cubePfS_PfS_, @function
_Z25__device_stub__Z4cubePfS_PfS_:
.LFB2083:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4cubePfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z25__device_stub__Z4cubePfS_PfS_, .-_Z25__device_stub__Z4cubePfS_PfS_
.globl _Z4cubePfS_
.type _Z4cubePfS_, @function
_Z4cubePfS_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z4cubePfS_PfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z4cubePfS_, .-_Z4cubePfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "\t"
.LC1:
.string "\n"
.LC2:
.string "%f"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
leaq -28672(%rsp), %r11
.cfi_def_cfa 11, 28720
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $3392, %rsp
.cfi_def_cfa_offset 32112
movq %fs:40, %rax
movq %rax, 32056(%rsp)
xorl %eax, %eax
.L14:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 48(%rsp,%rax,4)
addq $1, %rax
cmpq $4000, %rax
jne .L14
leaq 8(%rsp), %rdi
movl $16000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $16000, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $16000, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1000, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $4, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L23
.L15:
leaq 16048(%rsp), %rdi
movl $2, %ecx
movl $16000, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movl $0, %ebx
leaq 16048(%rsp), %r14
leaq .LC2(%rip), %r13
leaq .LC1(%rip), %r12
leaq .LC0(%rip), %rbp
.L17:
pxor %xmm0, %xmm0
cvtss2sd (%r14,%rbx,4), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl %ebx, %edx
sarl $31, %edx
shrl $30, %edx
leal (%rdx,%rbx), %eax
andl $3, %eax
subl %edx, %eax
cmpl $3, %eax
movq %rbp, %rsi
cmove %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $4000, %rbx
jne .L17
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 32056(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $32064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z25__device_stub__Z4cubePfS_PfS_
jmp .L15
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z4cubePfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z4cubePfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | # include <stdio.h>
__device__ float doTheCalculation(float f) {
return f * f * f;
}
__global__ void cube(float *d_in, float *d_out) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
float f = d_in[idx];
//d_out[idx] = f * f *f;
d_out[idx] = doTheCalculation(f);
}
int main() {
const int ARRAY_SIZE = 4000;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for(int i=0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare memory pointers
float * d_in;
float * d_out;
// allocating memory for GPU variables
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
cube<<<4, 1000>>>(d_in, d_out);
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
for(int i=0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i%4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
} | #include <hip/hip_runtime.h>
# include <stdio.h>
__device__ float doTheCalculation(float f) {
return f * f * f;
}
__global__ void cube(float *d_in, float *d_out) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
float f = d_in[idx];
//d_out[idx] = f * f *f;
d_out[idx] = doTheCalculation(f);
}
int main() {
const int ARRAY_SIZE = 4000;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for(int i=0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare memory pointers
float * d_in;
float * d_out;
// allocating memory for GPU variables
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
cube<<<4, 1000>>>(d_in, d_out);
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
for(int i=0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i%4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
# include <stdio.h>
__device__ float doTheCalculation(float f) {
return f * f * f;
}
__global__ void cube(float *d_in, float *d_out) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
float f = d_in[idx];
//d_out[idx] = f * f *f;
d_out[idx] = doTheCalculation(f);
}
int main() {
const int ARRAY_SIZE = 4000;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for(int i=0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare memory pointers
float * d_in;
float * d_out;
// allocating memory for GPU variables
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
cube<<<4, 1000>>>(d_in, d_out);
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
for(int i=0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i%4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4cubePfS_
.globl _Z4cubePfS_
.p2align 8
.type _Z4cubePfS_,@function
_Z4cubePfS_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b128 s[0:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v3, v2, v2
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4cubePfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4cubePfS_, .Lfunc_end0-_Z4cubePfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4cubePfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z4cubePfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
# include <stdio.h>
__device__ float doTheCalculation(float f) {
return f * f * f;
}
__global__ void cube(float *d_in, float *d_out) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
float f = d_in[idx];
//d_out[idx] = f * f *f;
d_out[idx] = doTheCalculation(f);
}
int main() {
const int ARRAY_SIZE = 4000;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for(int i=0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare memory pointers
float * d_in;
float * d_out;
// allocating memory for GPU variables
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
cube<<<4, 1000>>>(d_in, d_out);
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
for(int i=0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i%4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
} | .text
.file "cube.hip"
.globl _Z19__device_stub__cubePfS_ # -- Begin function _Z19__device_stub__cubePfS_
.p2align 4, 0x90
.type _Z19__device_stub__cubePfS_,@function
_Z19__device_stub__cubePfS_: # @_Z19__device_stub__cubePfS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z4cubePfS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z19__device_stub__cubePfS_, .Lfunc_end0-_Z19__device_stub__cubePfS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $32088, %rsp # imm = 0x7D58
.cfi_def_cfa_offset 32112
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 16080(%rsp,%rax,4)
incq %rax
cmpq $4000, %rax # imm = 0xFA0
jne .LBB1_1
# %bb.2:
leaq 8(%rsp), %rdi
movl $16000, %esi # imm = 0x3E80
callq hipMalloc
movq %rsp, %rdi
movl $16000, %esi # imm = 0x3E80
callq hipMalloc
movq 8(%rsp), %rdi
leaq 16080(%rsp), %rsi
movl $16000, %edx # imm = 0x3E80
movl $1, %ecx
callq hipMemcpy
movabsq $4294967300, %rdi # imm = 0x100000004
leaq 996(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 8(%rsp), %rax
movq (%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4cubePfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
leaq 80(%rsp), %rdi
movl $16000, %edx # imm = 0x3E80
movl $2, %ecx
callq hipMemcpy
movl $.L.str.2, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movss 80(%rsp,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
movl %r14d, %eax
notl %eax
testb $3, %al
movl $.L.str.1, %edi
cmoveq %rbx, %rdi
xorl %eax, %eax
callq printf
incq %r14
cmpq $4000, %r14 # imm = 0xFA0
jne .LBB1_5
# %bb.6:
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $32088, %rsp # imm = 0x7D58
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4cubePfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4cubePfS_,@object # @_Z4cubePfS_
.section .rodata,"a",@progbits
.globl _Z4cubePfS_
.p2align 3, 0x0
_Z4cubePfS_:
.quad _Z19__device_stub__cubePfS_
.size _Z4cubePfS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%f"
.size .L.str, 3
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\t"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\n"
.size .L.str.2, 2
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4cubePfS_"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__cubePfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4cubePfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z4cubePfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0050*/ IMAD R4, R3, c[0x0][0x0], R4 ; /* 0x0000000003047a24 */
/* 0x001fca00078e0204 */
/*0060*/ IMAD.WIDE R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fcc00078e0205 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fc800078e0205 */
/*0090*/ FMUL R7, R2, R2 ; /* 0x0000000202077220 */
/* 0x004fc80000400000 */
/*00a0*/ FMUL R7, R2, R7 ; /* 0x0000000702077220 */
/* 0x000fca0000400000 */
/*00b0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4cubePfS_
.globl _Z4cubePfS_
.p2align 8
.type _Z4cubePfS_,@function
_Z4cubePfS_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b128 s[0:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v3, v2, v2
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4cubePfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4cubePfS_, .Lfunc_end0-_Z4cubePfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4cubePfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z4cubePfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000aa0f6_00000000-6_cube.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z16doTheCalculationf
.type _Z16doTheCalculationf, @function
_Z16doTheCalculationf:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z16doTheCalculationf, .-_Z16doTheCalculationf
.globl _Z25__device_stub__Z4cubePfS_PfS_
.type _Z25__device_stub__Z4cubePfS_PfS_, @function
_Z25__device_stub__Z4cubePfS_PfS_:
.LFB2083:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4cubePfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z25__device_stub__Z4cubePfS_PfS_, .-_Z25__device_stub__Z4cubePfS_PfS_
.globl _Z4cubePfS_
.type _Z4cubePfS_, @function
_Z4cubePfS_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z4cubePfS_PfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z4cubePfS_, .-_Z4cubePfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "\t"
.LC1:
.string "\n"
.LC2:
.string "%f"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
leaq -28672(%rsp), %r11
.cfi_def_cfa 11, 28720
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $3392, %rsp
.cfi_def_cfa_offset 32112
movq %fs:40, %rax
movq %rax, 32056(%rsp)
xorl %eax, %eax
.L14:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 48(%rsp,%rax,4)
addq $1, %rax
cmpq $4000, %rax
jne .L14
leaq 8(%rsp), %rdi
movl $16000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $16000, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $16000, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1000, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $4, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L23
.L15:
leaq 16048(%rsp), %rdi
movl $2, %ecx
movl $16000, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movl $0, %ebx
leaq 16048(%rsp), %r14
leaq .LC2(%rip), %r13
leaq .LC1(%rip), %r12
leaq .LC0(%rip), %rbp
.L17:
pxor %xmm0, %xmm0
cvtss2sd (%r14,%rbx,4), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl %ebx, %edx
sarl $31, %edx
shrl $30, %edx
leal (%rdx,%rbx), %eax
andl $3, %eax
subl %edx, %eax
cmpl $3, %eax
movq %rbp, %rsi
cmove %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $4000, %rbx
jne .L17
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 32056(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $32064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z25__device_stub__Z4cubePfS_PfS_
jmp .L15
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z4cubePfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z4cubePfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cube.hip"
.globl _Z19__device_stub__cubePfS_ # -- Begin function _Z19__device_stub__cubePfS_
.p2align 4, 0x90
.type _Z19__device_stub__cubePfS_,@function
_Z19__device_stub__cubePfS_: # @_Z19__device_stub__cubePfS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z4cubePfS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z19__device_stub__cubePfS_, .Lfunc_end0-_Z19__device_stub__cubePfS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $32088, %rsp # imm = 0x7D58
.cfi_def_cfa_offset 32112
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 16080(%rsp,%rax,4)
incq %rax
cmpq $4000, %rax # imm = 0xFA0
jne .LBB1_1
# %bb.2:
leaq 8(%rsp), %rdi
movl $16000, %esi # imm = 0x3E80
callq hipMalloc
movq %rsp, %rdi
movl $16000, %esi # imm = 0x3E80
callq hipMalloc
movq 8(%rsp), %rdi
leaq 16080(%rsp), %rsi
movl $16000, %edx # imm = 0x3E80
movl $1, %ecx
callq hipMemcpy
movabsq $4294967300, %rdi # imm = 0x100000004
leaq 996(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 8(%rsp), %rax
movq (%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4cubePfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
leaq 80(%rsp), %rdi
movl $16000, %edx # imm = 0x3E80
movl $2, %ecx
callq hipMemcpy
movl $.L.str.2, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movss 80(%rsp,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
movl %r14d, %eax
notl %eax
testb $3, %al
movl $.L.str.1, %edi
cmoveq %rbx, %rdi
xorl %eax, %eax
callq printf
incq %r14
cmpq $4000, %r14 # imm = 0xFA0
jne .LBB1_5
# %bb.6:
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $32088, %rsp # imm = 0x7D58
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4cubePfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4cubePfS_,@object # @_Z4cubePfS_
.section .rodata,"a",@progbits
.globl _Z4cubePfS_
.p2align 3, 0x0
_Z4cubePfS_:
.quad _Z19__device_stub__cubePfS_
.size _Z4cubePfS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%f"
.size .L.str, 3
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\t"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\n"
.size .L.str.2, 2
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4cubePfS_"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__cubePfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4cubePfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
*Derek Trom
*HW5 CSCI364
*/
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <iomanip>
#include <cstdio>
__device__ float add(float num){
float outnum = num + 1;
return outnum;
}
__global__
void func1(float *xd, float *yd, int n) {
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+= stride) {
yd[i] = add(xd[i]);
}
}
__global__
void createArrays(float *in, float *out, int n){
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+=stride) {
in[i] = 1.0f;
out[i] = 0.0f;
}
}
int main(int argc, char **argv){
using namespace std;
if( argc< 3){
cerr<<"Usage: "<<argv[0]<<" <length of arrays> <num threads/block>"<<endl;
return 1;
}
int threads_per_block = atoi(argv[2]);
int sizeOfArray = atoi(argv[1]);
if (sizeOfArray < 1 or threads_per_block < 1){
cerr<<"Array length and block size must be > 0"<<endl;
return 1;
}
float *xd, *yd;
cudaMallocManaged(&xd, sizeOfArray*sizeof(float));
cudaMallocManaged(&yd, sizeOfArray*sizeof(float));
//---------PHASE ONE----------//
int numBlocks = (sizeOfArray + threads_per_block- 1) / threads_per_block;
createArrays<<<numBlocks, threads_per_block>>>(xd, yd, sizeOfArray);
func1<<<numBlocks, threads_per_block>>>(xd,yd,sizeOfArray);
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 1"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
//--------Phase 2-------//
//Use half the number of blocks to get the next number but use
//the same kernel function
threads_per_block = threads_per_block/2;
createArrays<<<numBlocks, threads_per_block>>>(xd, yd, sizeOfArray);
func1<<<numBlocks, threads_per_block>>>(xd,yd,sizeOfArray);
cudaDeviceSynchronize();
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 2"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
cudaFree(xd);
cudaFree(yd);
return 0;
} | code for sm_80
Function : _Z12createArraysPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ BSSY B0, 0x310 ; /* 0x0000028000007945 */
/* 0x000fe60003800000 */
/*0090*/ IMAD R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a24 */
/* 0x000fc800078e02ff */
/*00a0*/ I2F.U32.RP R6, R0 ; /* 0x0000000000067306 */
/* 0x000e220000209000 */
/*00b0*/ IMAD.MOV R9, RZ, RZ, -R0 ; /* 0x000000ffff097224 */
/* 0x000fe200078e0a00 */
/*00c0*/ IADD3 R2, R0.reuse, R3, RZ ; /* 0x0000000300027210 */
/* 0x040fe40007ffe0ff */
/*00d0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe40003f45070 */
/*00e0*/ LOP3.LUT R7, RZ, R2, RZ, 0x33, !PT ; /* 0x00000002ff077212 */
/* 0x000fc800078e33ff */
/*00f0*/ IADD3 R7, R7, c[0x0][0x170], R0 ; /* 0x00005c0007077a10 */
/* 0x000fe20007ffe000 */
/*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*0130*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x001fe400078e00ff */
/*0140*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */
/* 0x002fc800078e02ff */
/*0150*/ IMAD.HI.U32 R2, R5, R9, R4 ; /* 0x0000000905027227 */
/* 0x000fcc00078e0004 */
/*0160*/ IMAD.HI.U32 R2, R2, R7, RZ ; /* 0x0000000702027227 */
/* 0x000fca00078e00ff */
/*0170*/ IADD3 R4, -R2, RZ, RZ ; /* 0x000000ff02047210 */
/* 0x000fca0007ffe1ff */
/*0180*/ IMAD R7, R0, R4, R7 ; /* 0x0000000400077224 */
/* 0x000fca00078e0207 */
/*0190*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f06070 */
/*01a0*/ @P0 IMAD.IADD R7, R7, 0x1, -R0 ; /* 0x0000000107070824 */
/* 0x000fe200078e0a00 */
/*01b0*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */
/* 0x000fc80007ffe0ff */
/*01c0*/ ISETP.GE.U32.AND P1, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f26070 */
/*01d0*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */
/* 0x000fe40007ffe0ff */
/*01e0*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */
/* 0x000fc800078e33ff */
/*01f0*/ IADD3 R4, R2.reuse, 0x1, RZ ; /* 0x0000000102047810 */
/* 0x040fe40007ffe0ff */
/*0200*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f26070 */
/*0210*/ LOP3.LUT P0, R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fda000780c0ff */
/*0220*/ @!P0 BRA 0x300 ; /* 0x000000d000008947 */
/* 0x000fea0003800000 */
/*0230*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x000fe200000001ff */
/*0240*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */
/* 0x000fe200078e0004 */
/*0250*/ MOV R9, 0x3f800000 ; /* 0x3f80000000097802 */
/* 0x000fd00000000f00 */
/*0260*/ IMAD.WIDE R4, R3, R6, c[0x0][0x168] ; /* 0x00005a0003047625 */
/* 0x000fc800078e0206 */
/*0270*/ IMAD.WIDE R6, R3, R6, c[0x0][0x160] ; /* 0x0000580003067625 */
/* 0x000fc800078e0206 */
/*0280*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fe20007ffe0ff */
/*0290*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e2000c101904 */
/*02a0*/ IMAD.IADD R3, R0, 0x1, R3 ; /* 0x0000000100037824 */
/* 0x000fe400078e0203 */
/*02b0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f05270 */
/*02c0*/ STG.E [R4.64], RZ ; /* 0x000000ff04007986 */
/* 0x0003e2000c101904 */
/*02d0*/ IMAD.WIDE R6, R0, 0x4, R6 ; /* 0x0000000400067825 */
/* 0x001fc800078e0206 */
/*02e0*/ IMAD.WIDE R4, R0, 0x4, R4 ; /* 0x0000000400047825 */
/* 0x002fce00078e0204 */
/*02f0*/ @P0 BRA 0x280 ; /* 0xffffff8000000947 */
/* 0x000fea000383ffff */
/*0300*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0310*/ @!P1 EXIT ; /* 0x000000000000994d */
/* 0x000fea0003800000 */
/*0320*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*0330*/ IMAD.MOV.U32 R21, RZ, RZ, 0x3f800000 ; /* 0x3f800000ff157424 */
/* 0x000fd200078e00ff */
/*0340*/ IMAD.WIDE R4, R3, R2, c[0x0][0x160] ; /* 0x0000580003047625 */
/* 0x001fc800078e0202 */
/*0350*/ IMAD.WIDE R6, R3, R2, c[0x0][0x168] ; /* 0x00005a0003067625 */
/* 0x000fe200078e0202 */
/*0360*/ STG.E [R4.64], R21 ; /* 0x0000001504007986 */
/* 0x0001e2000c101904 */
/*0370*/ IADD3 R3, R0.reuse, R3, R0 ; /* 0x0000000300037210 */
/* 0x040fe40007ffe000 */
/*0380*/ IMAD.WIDE R8, R0.reuse, 0x4, R4 ; /* 0x0000000400087825 */
/* 0x040fe200078e0204 */
/*0390*/ STG.E [R6.64], RZ ; /* 0x000000ff06007986 */
/* 0x0001e2000c101904 */
/*03a0*/ IADD3 R3, R0.reuse, R3, R0 ; /* 0x0000000300037210 */
/* 0x040fe40007ffe000 */
/*03b0*/ IMAD.WIDE R10, R0, 0x4, R6 ; /* 0x00000004000a7825 */
/* 0x000fe200078e0206 */
/*03c0*/ STG.E [R8.64], R21 ; /* 0x0000001508007986 */
/* 0x0001e2000c101904 */
/*03d0*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fc40003f06270 */
/*03e0*/ IMAD.WIDE R12, R0.reuse, 0x4, R8 ; /* 0x00000004000c7825 */
/* 0x040fe200078e0208 */
/*03f0*/ STG.E [R10.64], RZ ; /* 0x000000ff0a007986 */
/* 0x0001e6000c101904 */
/*0400*/ IMAD.WIDE R14, R0.reuse, 0x4, R10 ; /* 0x00000004000e7825 */
/* 0x040fe200078e020a */
/*0410*/ STG.E [R12.64], R21 ; /* 0x000000150c007986 */
/* 0x0001e6000c101904 */
/*0420*/ IMAD.WIDE R16, R0.reuse, 0x4, R12 ; /* 0x0000000400107825 */
/* 0x040fe200078e020c */
/*0430*/ STG.E [R14.64], RZ ; /* 0x000000ff0e007986 */
/* 0x0001e6000c101904 */
/*0440*/ IMAD.WIDE R18, R0, 0x4, R14 ; /* 0x0000000400127825 */
/* 0x000fe200078e020e */
/*0450*/ STG.E [R16.64], R21 ; /* 0x0000001510007986 */
/* 0x0001e8000c101904 */
/*0460*/ STG.E [R18.64], RZ ; /* 0x000000ff12007986 */
/* 0x0001e2000c101904 */
/*0470*/ @!P0 BRA 0x340 ; /* 0xfffffec000008947 */
/* 0x000fea000383ffff */
/*0480*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0490*/ BRA 0x490; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*04a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0500*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z5func1PfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ BSSY B0, 0x310 ; /* 0x0000028000007945 */
/* 0x000fe60003800000 */
/*0090*/ IMAD R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a24 */
/* 0x000fc800078e02ff */
/*00a0*/ I2F.U32.RP R6, R0 ; /* 0x0000000000067306 */
/* 0x000e220000209000 */
/*00b0*/ IMAD.MOV R9, RZ, RZ, -R0 ; /* 0x000000ffff097224 */
/* 0x000fe200078e0a00 */
/*00c0*/ IADD3 R2, R0.reuse, R3, RZ ; /* 0x0000000300027210 */
/* 0x040fe40007ffe0ff */
/*00d0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe40003f45070 */
/*00e0*/ LOP3.LUT R7, RZ, R2, RZ, 0x33, !PT ; /* 0x00000002ff077212 */
/* 0x000fc800078e33ff */
/*00f0*/ IADD3 R7, R7, c[0x0][0x170], R0 ; /* 0x00005c0007077a10 */
/* 0x000fe20007ffe000 */
/*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*0130*/ HFMA2.MMA R4, -RZ, RZ, 0, 0 ; /* 0x00000000ff047435 */
/* 0x001fe200000001ff */
/*0140*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */
/* 0x002fd200078e02ff */
/*0150*/ IMAD.HI.U32 R2, R5, R9, R4 ; /* 0x0000000905027227 */
/* 0x000fcc00078e0004 */
/*0160*/ IMAD.HI.U32 R2, R2, R7, RZ ; /* 0x0000000702027227 */
/* 0x000fc800078e00ff */
/*0170*/ IMAD.MOV R4, RZ, RZ, -R2 ; /* 0x000000ffff047224 */
/* 0x000fc800078e0a02 */
/*0180*/ IMAD R7, R0, R4, R7 ; /* 0x0000000400077224 */
/* 0x000fca00078e0207 */
/*0190*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f06070 */
/*01a0*/ @P0 IADD3 R7, -R0, R7, RZ ; /* 0x0000000700070210 */
/* 0x000fe40007ffe1ff */
/*01b0*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */
/* 0x000fe40007ffe0ff */
/*01c0*/ ISETP.GE.U32.AND P1, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f26070 */
/*01d0*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */
/* 0x000fe40007ffe0ff */
/*01e0*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */
/* 0x000fc800078e33ff */
/*01f0*/ IADD3 R4, R2.reuse, 0x1, RZ ; /* 0x0000000102047810 */
/* 0x040fe40007ffe0ff */
/*0200*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f26070 */
/*0210*/ LOP3.LUT P0, R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fda000780c0ff */
/*0220*/ @!P0 BRA 0x300 ; /* 0x000000d000008947 */
/* 0x000fea0003800000 */
/*0230*/ MOV R6, 0x4 ; /* 0x0000000400067802 */
/* 0x000fe20000000f00 */
/*0240*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */
/* 0x000fc800078e0004 */
/*0250*/ IMAD.WIDE R4, R3, R6, c[0x0][0x168] ; /* 0x00005a0003047625 */
/* 0x000fc800078e0206 */
/*0260*/ IMAD.WIDE R6, R3, R6, c[0x0][0x160] ; /* 0x0000580003067625 */
/* 0x000fca00078e0206 */
/*0270*/ LDG.E R8, [R6.64] ; /* 0x0000000406087981 */
/* 0x0000a2000c1e1900 */
/*0280*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fe40007ffe0ff */
/*0290*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */
/* 0x000fe40007ffe0ff */
/*02a0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f05270 */
/*02b0*/ IMAD.WIDE R6, R0, 0x4, R6 ; /* 0x0000000400067825 */
/* 0x001fc800078e0206 */
/*02c0*/ FADD R9, R8, 1 ; /* 0x3f80000008097421 */
/* 0x004fca0000000000 */
/*02d0*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0001e4000c101904 */
/*02e0*/ IMAD.WIDE R4, R0, 0x4, R4 ; /* 0x0000000400047825 */
/* 0x001fe200078e0204 */
/*02f0*/ @P0 BRA 0x270 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0300*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0310*/ @!P1 EXIT ; /* 0x000000000000994d */
/* 0x000fea0003800000 */
/*0320*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x002fd400000001ff */
/*0330*/ IMAD.WIDE R4, R3, R6, c[0x0][0x160] ; /* 0x0000580003047625 */
/* 0x000fca00078e0206 */
/*0340*/ LDG.E R2, [R4.64] ; /* 0x0000000404027981 */
/* 0x000ea2000c1e1900 */
/*0350*/ IMAD.WIDE R6, R3, R6, c[0x0][0x168] ; /* 0x00005a0003067625 */
/* 0x000fc800078e0206 */
/*0360*/ IMAD.WIDE R8, R0, 0x4, R4 ; /* 0x0000000400087825 */
/* 0x000fc800078e0204 */
/*0370*/ FADD R17, R2, 1 ; /* 0x3f80000002117421 */
/* 0x004fca0000000000 */
/*0380*/ STG.E [R6.64], R17 ; /* 0x0000001106007986 */
/* 0x0001e8000c101904 */
/*0390*/ LDG.E R2, [R8.64] ; /* 0x0000000408027981 */
/* 0x000ea2000c1e1900 */
/*03a0*/ IMAD.WIDE R10, R0, 0x4, R6 ; /* 0x00000004000a7825 */
/* 0x000fc800078e0206 */
/*03b0*/ IMAD.WIDE R12, R0, 0x4, R8 ; /* 0x00000004000c7825 */
/* 0x000fc800078e0208 */
/*03c0*/ FADD R19, R2, 1 ; /* 0x3f80000002137421 */
/* 0x004fca0000000000 */
/*03d0*/ STG.E [R10.64], R19 ; /* 0x000000130a007986 */
/* 0x0003e8000c101904 */
/*03e0*/ LDG.E R2, [R12.64] ; /* 0x000000040c027981 */
/* 0x000ea2000c1e1900 */
/*03f0*/ IMAD.WIDE R4, R0, 0x4, R10 ; /* 0x0000000400047825 */
/* 0x000fc800078e020a */
/*0400*/ IMAD.WIDE R14, R0, 0x4, R12 ; /* 0x00000004000e7825 */
/* 0x000fc800078e020c */
/*0410*/ FADD R21, R2, 1 ; /* 0x3f80000002157421 */
/* 0x004fca0000000000 */
/*0420*/ STG.E [R4.64], R21 ; /* 0x0000001504007986 */
/* 0x0003e8000c101904 */
/*0430*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ea2000c1e1900 */
/*0440*/ IMAD.WIDE R6, R0.reuse, 0x4, R4 ; /* 0x0000000400067825 */
/* 0x041fe200078e0204 */
/*0450*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*0460*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*0470*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fe20003f06270 */
/*0480*/ FADD R9, R14, 1 ; /* 0x3f8000000e097421 */
/* 0x004fca0000000000 */
/*0490*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0003ee000c101904 */
/*04a0*/ @!P0 BRA 0x320 ; /* 0xfffffe7000008947 */
/* 0x000fea000383ffff */
/*04b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*04c0*/ BRA 0x4c0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0500*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
*Derek Trom
*HW5 CSCI364
*/
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <iomanip>
#include <cstdio>
__device__ float add(float num){
float outnum = num + 1;
return outnum;
}
__global__
void func1(float *xd, float *yd, int n) {
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+= stride) {
yd[i] = add(xd[i]);
}
}
__global__
void createArrays(float *in, float *out, int n){
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+=stride) {
in[i] = 1.0f;
out[i] = 0.0f;
}
}
int main(int argc, char **argv){
using namespace std;
if( argc< 3){
cerr<<"Usage: "<<argv[0]<<" <length of arrays> <num threads/block>"<<endl;
return 1;
}
int threads_per_block = atoi(argv[2]);
int sizeOfArray = atoi(argv[1]);
if (sizeOfArray < 1 or threads_per_block < 1){
cerr<<"Array length and block size must be > 0"<<endl;
return 1;
}
float *xd, *yd;
cudaMallocManaged(&xd, sizeOfArray*sizeof(float));
cudaMallocManaged(&yd, sizeOfArray*sizeof(float));
//---------PHASE ONE----------//
int numBlocks = (sizeOfArray + threads_per_block- 1) / threads_per_block;
createArrays<<<numBlocks, threads_per_block>>>(xd, yd, sizeOfArray);
func1<<<numBlocks, threads_per_block>>>(xd,yd,sizeOfArray);
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 1"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
//--------Phase 2-------//
//Use half the number of blocks to get the next number but use
//the same kernel function
threads_per_block = threads_per_block/2;
createArrays<<<numBlocks, threads_per_block>>>(xd, yd, sizeOfArray);
func1<<<numBlocks, threads_per_block>>>(xd,yd,sizeOfArray);
cudaDeviceSynchronize();
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 2"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
cudaFree(xd);
cudaFree(yd);
return 0;
} | .file "tmpxft_0004c55b_00000000-6_hw5.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3953:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3953:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z3addf
.type _Z3addf, @function
_Z3addf:
.LFB3949:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE3949:
.size _Z3addf, .-_Z3addf
.globl _Z27__device_stub__Z5func1PfS_iPfS_i
.type _Z27__device_stub__Z5func1PfS_iPfS_i, @function
_Z27__device_stub__Z5func1PfS_iPfS_i:
.LFB3975:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5func1PfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3975:
.size _Z27__device_stub__Z5func1PfS_iPfS_i, .-_Z27__device_stub__Z5func1PfS_iPfS_i
.globl _Z5func1PfS_i
.type _Z5func1PfS_i, @function
_Z5func1PfS_i:
.LFB3976:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z5func1PfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3976:
.size _Z5func1PfS_i, .-_Z5func1PfS_i
.globl _Z35__device_stub__Z12createArraysPfS_iPfS_i
.type _Z35__device_stub__Z12createArraysPfS_iPfS_i, @function
_Z35__device_stub__Z12createArraysPfS_iPfS_i:
.LFB3977:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12createArraysPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3977:
.size _Z35__device_stub__Z12createArraysPfS_iPfS_i, .-_Z35__device_stub__Z12createArraysPfS_iPfS_i
.globl _Z12createArraysPfS_i
.type _Z12createArraysPfS_i, @function
_Z12createArraysPfS_i:
.LFB3978:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z12createArraysPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3978:
.size _Z12createArraysPfS_i, .-_Z12createArraysPfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Usage: "
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string " <length of arrays> <num threads/block>"
.align 8
.LC3:
.string "Array length and block size must be > 0"
.section .rodata.str1.1
.LC6:
.string "Phase 1"
.LC7:
.string "Array size: "
.LC8:
.string "Threads per block: "
.LC9:
.string "Number of blocks: "
.LC10:
.string "Max error: "
.LC11:
.string "Phase 2"
.text
.globl main
.type main, @function
main:
.LFB3950:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %rsi, %rbx
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
cmpl $2, %edi
jle .L37
movq 16(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r14
movl %eax, %r12d
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbx
movl %eax, %ebp
testl %eax, %eax
jle .L33
testl %r14d, %r14d
jle .L33
movslq %eax, %r13
salq $2, %r13
leaq 16(%rsp), %rdi
movl $1, %edx
movq %r13, %rsi
call cudaMallocManaged@PLT
leaq 24(%rsp), %rdi
movl $1, %edx
movq %r13, %rsi
call cudaMallocManaged@PLT
leal -1(%r14,%rbx), %eax
cltd
idivl %r14d
movl %eax, %r13d
movl %r14d, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl %eax, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L38
.L26:
movl %r14d, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl %r13d, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L39
.L27:
call cudaDeviceSynchronize@PLT
movq 24(%rsp), %r14
movl $0, %ebx
movl $0x00000000, 12(%rsp)
.L28:
movss (%r14,%rbx,4), %xmm0
subss .LC4(%rip), %xmm0
andps .LC5(%rip), %xmm0
movss 12(%rsp), %xmm1
call fmaxf@PLT
movss %xmm0, 12(%rsp)
addq $1, %rbx
cmpl %ebx, %ebp
jg .L28
leaq .LC6(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq %rbx, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq %rax, %rdi
leaq .LC7(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebp, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC8(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %r12d, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC9(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %r13d, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC10(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $2, %ecx
movl %r12d, %eax
cltd
idivl %ecx
movl %eax, %r12d
movl %eax, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl %r13d, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L40
.L29:
movl %r12d, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl %r13d, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L41
.L30:
call cudaDeviceSynchronize@PLT
movq 24(%rsp), %r14
movl $0, %ebx
.L31:
movss (%r14,%rbx,4), %xmm0
subss .LC4(%rip), %xmm0
andps .LC5(%rip), %xmm0
movss 12(%rsp), %xmm1
call fmaxf@PLT
movss %xmm0, 12(%rsp)
addq $1, %rbx
cmpl %ebx, %ebp
jg .L31
leaq .LC11(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq %rbx, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq %rax, %rdi
leaq .LC7(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebp, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC8(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %r12d, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC9(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %r13d, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC10(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movl $0, %eax
.L21:
movq 56(%rsp), %rdx
subq %fs:40, %rdx
jne .L42
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L37:
.cfi_restore_state
leaq .LC1(%rip), %rsi
leaq _ZSt4cerr(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq (%rbx), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $1, %eax
jmp .L21
.L33:
leaq .LC3(%rip), %rsi
leaq _ZSt4cerr(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $1, %eax
jmp .L21
.L38:
movl %ebx, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z35__device_stub__Z12createArraysPfS_iPfS_i
jmp .L26
.L39:
movl %ebp, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z27__device_stub__Z5func1PfS_iPfS_i
jmp .L27
.L40:
movl %ebp, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z35__device_stub__Z12createArraysPfS_iPfS_i
jmp .L29
.L41:
movl %ebp, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z27__device_stub__Z5func1PfS_iPfS_i
jmp .L30
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3950:
.size main, .-main
.section .rodata.str1.1
.LC12:
.string "_Z12createArraysPfS_i"
.LC13:
.string "_Z5func1PfS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3980:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z12createArraysPfS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z5func1PfS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3980:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC4:
.long 1073741824
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC5:
.long 2147483647
.long 0
.long 0
.long 0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
*Derek Trom
*HW5 CSCI364
*/
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <iomanip>
#include <cstdio>
__device__ float add(float num){
float outnum = num + 1;
return outnum;
}
__global__
void func1(float *xd, float *yd, int n) {
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+= stride) {
yd[i] = add(xd[i]);
}
}
__global__
void createArrays(float *in, float *out, int n){
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+=stride) {
in[i] = 1.0f;
out[i] = 0.0f;
}
}
int main(int argc, char **argv){
using namespace std;
if( argc< 3){
cerr<<"Usage: "<<argv[0]<<" <length of arrays> <num threads/block>"<<endl;
return 1;
}
int threads_per_block = atoi(argv[2]);
int sizeOfArray = atoi(argv[1]);
if (sizeOfArray < 1 or threads_per_block < 1){
cerr<<"Array length and block size must be > 0"<<endl;
return 1;
}
float *xd, *yd;
cudaMallocManaged(&xd, sizeOfArray*sizeof(float));
cudaMallocManaged(&yd, sizeOfArray*sizeof(float));
//---------PHASE ONE----------//
int numBlocks = (sizeOfArray + threads_per_block- 1) / threads_per_block;
createArrays<<<numBlocks, threads_per_block>>>(xd, yd, sizeOfArray);
func1<<<numBlocks, threads_per_block>>>(xd,yd,sizeOfArray);
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 1"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
//--------Phase 2-------//
//Use half the number of blocks to get the next number but use
//the same kernel function
threads_per_block = threads_per_block/2;
createArrays<<<numBlocks, threads_per_block>>>(xd, yd, sizeOfArray);
func1<<<numBlocks, threads_per_block>>>(xd,yd,sizeOfArray);
cudaDeviceSynchronize();
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 2"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
cudaFree(xd);
cudaFree(yd);
return 0;
} | /*
*Derek Trom
*HW5 CSCI364
*/
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <iomanip>
#include <cstdio>
__device__ float add(float num){
float outnum = num + 1;
return outnum;
}
__global__
void func1(float *xd, float *yd, int n) {
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+= stride) {
yd[i] = add(xd[i]);
}
}
__global__
void createArrays(float *in, float *out, int n){
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+=stride) {
in[i] = 1.0f;
out[i] = 0.0f;
}
}
int main(int argc, char **argv){
using namespace std;
if( argc< 3){
cerr<<"Usage: "<<argv[0]<<" <length of arrays> <num threads/block>"<<endl;
return 1;
}
int threads_per_block = atoi(argv[2]);
int sizeOfArray = atoi(argv[1]);
if (sizeOfArray < 1 or threads_per_block < 1){
cerr<<"Array length and block size must be > 0"<<endl;
return 1;
}
float *xd, *yd;
hipMallocManaged(&xd, sizeOfArray*sizeof(float));
hipMallocManaged(&yd, sizeOfArray*sizeof(float));
//---------PHASE ONE----------//
int numBlocks = (sizeOfArray + threads_per_block- 1) / threads_per_block;
createArrays<<<numBlocks, threads_per_block>>>(xd, yd, sizeOfArray);
func1<<<numBlocks, threads_per_block>>>(xd,yd,sizeOfArray);
hipDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 1"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
//--------Phase 2-------//
//Use half the number of blocks to get the next number but use
//the same kernel function
threads_per_block = threads_per_block/2;
createArrays<<<numBlocks, threads_per_block>>>(xd, yd, sizeOfArray);
func1<<<numBlocks, threads_per_block>>>(xd,yd,sizeOfArray);
hipDeviceSynchronize();
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 2"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
hipFree(xd);
hipFree(yd);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
*Derek Trom
*HW5 CSCI364
*/
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <iomanip>
#include <cstdio>
__device__ float add(float num){
float outnum = num + 1;
return outnum;
}
__global__
void func1(float *xd, float *yd, int n) {
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+= stride) {
yd[i] = add(xd[i]);
}
}
__global__
void createArrays(float *in, float *out, int n){
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+=stride) {
in[i] = 1.0f;
out[i] = 0.0f;
}
}
int main(int argc, char **argv){
using namespace std;
if( argc< 3){
cerr<<"Usage: "<<argv[0]<<" <length of arrays> <num threads/block>"<<endl;
return 1;
}
int threads_per_block = atoi(argv[2]);
int sizeOfArray = atoi(argv[1]);
if (sizeOfArray < 1 or threads_per_block < 1){
cerr<<"Array length and block size must be > 0"<<endl;
return 1;
}
float *xd, *yd;
hipMallocManaged(&xd, sizeOfArray*sizeof(float));
hipMallocManaged(&yd, sizeOfArray*sizeof(float));
//---------PHASE ONE----------//
int numBlocks = (sizeOfArray + threads_per_block- 1) / threads_per_block;
createArrays<<<numBlocks, threads_per_block>>>(xd, yd, sizeOfArray);
func1<<<numBlocks, threads_per_block>>>(xd,yd,sizeOfArray);
hipDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 1"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
//--------Phase 2-------//
//Use half the number of blocks to get the next number but use
//the same kernel function
threads_per_block = threads_per_block/2;
createArrays<<<numBlocks, threads_per_block>>>(xd, yd, sizeOfArray);
func1<<<numBlocks, threads_per_block>>>(xd,yd,sizeOfArray);
hipDeviceSynchronize();
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 2"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
hipFree(xd);
hipFree(yd);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5func1PfS_i
.globl _Z5func1PfS_i
.p2align 8
.type _Z5func1PfS_i,@function
_Z5func1PfS_i:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s10, s[0:1], 0x10
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s10, v1
s_cbranch_execz .LBB0_3
s_load_b32 s2, s[2:3], 0x0
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s8
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[2:3], 2
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1)
v_add_co_u32 v4, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
global_load_b32 v0, v[4:5], off
v_add_nc_u32_e32 v1, s2, v1
v_add_co_u32 v4, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, v2, s8
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, 1.0, v0
v_cmp_le_i32_e64 s0, s10, v1
global_store_b32 v[4:5], v0, off
s_or_b32 s1, s0, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5func1PfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5func1PfS_i, .Lfunc_end0-_Z5func1PfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z12createArraysPfS_i
.globl _Z12createArraysPfS_i
.p2align 8
.type _Z12createArraysPfS_i,@function
_Z12createArraysPfS_i:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s10, s[0:1], 0x10
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s10, v1
s_cbranch_execz .LBB1_3
s_load_b32 s2, s[2:3], 0x0
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v0, 1.0
v_mov_b32_e32 v4, 0
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s8
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[2:3], 2
.p2align 6
.LBB1_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
v_add_co_u32 v5, vcc_lo, s4, v2
v_add_nc_u32_e32 v1, s2, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v7, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v3, vcc_lo
v_cmp_le_i32_e32 vcc_lo, s10, v1
v_add_co_u32 v2, s0, v2, s8
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s0, s9, v3, s0
s_or_b32 s1, vcc_lo, s1
global_store_b32 v[5:6], v0, off
global_store_b32 v[7:8], v4, off
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB1_2
.LBB1_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12createArraysPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z12createArraysPfS_i, .Lfunc_end1-_Z12createArraysPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5func1PfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z5func1PfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12createArraysPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12createArraysPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
*Derek Trom
*HW5 CSCI364
*/
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <iomanip>
#include <cstdio>
__device__ float add(float num){
float outnum = num + 1;
return outnum;
}
__global__
void func1(float *xd, float *yd, int n) {
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+= stride) {
yd[i] = add(xd[i]);
}
}
__global__
void createArrays(float *in, float *out, int n){
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+=stride) {
in[i] = 1.0f;
out[i] = 0.0f;
}
}
int main(int argc, char **argv){
using namespace std;
if( argc< 3){
cerr<<"Usage: "<<argv[0]<<" <length of arrays> <num threads/block>"<<endl;
return 1;
}
int threads_per_block = atoi(argv[2]);
int sizeOfArray = atoi(argv[1]);
if (sizeOfArray < 1 or threads_per_block < 1){
cerr<<"Array length and block size must be > 0"<<endl;
return 1;
}
float *xd, *yd;
hipMallocManaged(&xd, sizeOfArray*sizeof(float));
hipMallocManaged(&yd, sizeOfArray*sizeof(float));
//---------PHASE ONE----------//
int numBlocks = (sizeOfArray + threads_per_block- 1) / threads_per_block;
createArrays<<<numBlocks, threads_per_block>>>(xd, yd, sizeOfArray);
func1<<<numBlocks, threads_per_block>>>(xd,yd,sizeOfArray);
hipDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 1"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
//--------Phase 2-------//
//Use half the number of blocks to get the next number but use
//the same kernel function
threads_per_block = threads_per_block/2;
createArrays<<<numBlocks, threads_per_block>>>(xd, yd, sizeOfArray);
func1<<<numBlocks, threads_per_block>>>(xd,yd,sizeOfArray);
hipDeviceSynchronize();
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 2"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
hipFree(xd);
hipFree(yd);
return 0;
} | .text
.file "hw5.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z20__device_stub__func1PfS_i # -- Begin function _Z20__device_stub__func1PfS_i
.p2align 4, 0x90
.type _Z20__device_stub__func1PfS_i,@function
_Z20__device_stub__func1PfS_i: # @_Z20__device_stub__func1PfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5func1PfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z20__device_stub__func1PfS_i, .Lfunc_end0-_Z20__device_stub__func1PfS_i
.cfi_endproc
# -- End function
.globl _Z27__device_stub__createArraysPfS_i # -- Begin function _Z27__device_stub__createArraysPfS_i
.p2align 4, 0x90
.type _Z27__device_stub__createArraysPfS_i,@function
_Z27__device_stub__createArraysPfS_i: # @_Z27__device_stub__createArraysPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12createArraysPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z27__device_stub__createArraysPfS_i, .Lfunc_end1-_Z27__device_stub__createArraysPfS_i
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI2_0:
.long 0xc0000000 # float -2
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI2_1:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
cmpl $2, %edi
jg .LBB2_3
# %bb.1:
movl $_ZSt4cerr, %edi
movl $.L.str, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbx), %rbx
testq %rbx, %rbx
je .LBB2_15
# %bb.2:
movq %rbx, %rdi
callq strlen
movl $_ZSt4cerr, %edi
movq %rbx, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB2_16
.LBB2_3:
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movq 8(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
testl %r15d, %r15d
jle .LBB2_12
# %bb.4:
testl %r14d, %r14d
jle .LBB2_12
# %bb.5:
movabsq $4294967296, %r12 # imm = 0x100000000
movl %r15d, %ebx
shlq $2, %rbx
leaq 88(%rsp), %rdi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
leaq 16(%rsp), %rdi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
leal (%r14,%r15), %eax
decl %eax
cltd
idivl %r14d
# kill: def $eax killed $eax def $rax
movq %rax, 144(%rsp) # 8-byte Spill
leaq (%rax,%r12), %r13
movl %r14d, %ebp
orq %r12, %rbp
movq %r13, %rdi
movl $1, %esi
movq %rbp, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_7
# %bb.6:
movq 88(%rsp), %rax
movq 16(%rsp), %rcx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movl %r15d, 12(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 72(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12createArraysPfS_i, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_7:
movq %r13, %rdi
movl $1, %esi
movq %rbp, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_9
# %bb.8:
movq 88(%rsp), %rax
movq 16(%rsp), %rcx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movl %r15d, 12(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 72(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z5func1PfS_i, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_9:
callq hipDeviceSynchronize
testl %r15d, %r15d
jle .LBB2_21
# %bb.10: # %.lr.ph
movq 16(%rsp), %rax
movl %r15d, %ecx
xorps %xmm2, %xmm2
xorl %edx, %edx
movss .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movaps .LCPI2_1(%rip), %xmm1 # xmm1 = [NaN,NaN,NaN,NaN]
movaps %xmm2, %xmm5
.p2align 4, 0x90
.LBB2_11: # =>This Inner Loop Header: Depth=1
movss (%rax,%rdx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
addss %xmm0, %xmm3
andps %xmm1, %xmm3
cmpunordss %xmm5, %xmm5
movaps %xmm5, %xmm4
andps %xmm3, %xmm4
maxss %xmm2, %xmm3
andnps %xmm3, %xmm5
orps %xmm4, %xmm5
incq %rdx
movaps %xmm5, %xmm2
cmpq %rdx, %rcx
jne .LBB2_11
jmp .LBB2_22
.LBB2_12:
movl $_ZSt4cerr, %edi
movl $.L.str.2, %esi
movl $39, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cerr(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cerr+240(%rax), %rbx
testq %rbx, %rbx
je .LBB2_80
# %bb.13: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i85
cmpb $0, 56(%rbx)
jne .LBB2_18
.LBB2_19:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
jmp .LBB2_20
.LBB2_15:
movq _ZSt4cerr(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cerr(%rax), %rdi
movl _ZSt4cerr+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB2_16: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
movl $_ZSt4cerr, %edi
movl $.L.str.1, %esi
movl $39, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cerr(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cerr+240(%rax), %rbx
testq %rbx, %rbx
je .LBB2_80
# %bb.17: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB2_19
.LBB2_18:
movzbl 67(%rbx), %eax
.LBB2_20: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cerr, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $1, %eax
jmp .LBB2_79
.LBB2_21:
xorps %xmm5, %xmm5
.LBB2_22: # %._crit_edge
movaps %xmm5, 128(%rsp) # 16-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbp
testq %rbp, %rbp
je .LBB2_80
# %bb.23: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i90
cmpb $0, 56(%rbp)
je .LBB2_25
# %bb.24:
movzbl 67(%rbp), %eax
jmp .LBB2_26
.LBB2_25:
movq %rbp, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_26: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit93
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbp
testq %rbp, %rbp
je .LBB2_80
# %bb.27: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i95
cmpb $0, 56(%rbp)
je .LBB2_29
# %bb.28:
movzbl 67(%rbp), %eax
jmp .LBB2_30
.LBB2_29:
movq %rbp, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_30: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit98
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq %rax, %rbp
movl $.L.str.4, %esi
movl $12, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %rbp, %rdi
movl %r15d, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbp
testq %rbp, %rbp
je .LBB2_80
# %bb.31: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i100
cmpb $0, 56(%rbp)
je .LBB2_33
# %bb.32:
movzbl 67(%rbp), %ecx
jmp .LBB2_34
.LBB2_33:
movq %rbp, %rdi
movq %rax, %rbx
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbx, %rax
.LBB2_34: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit103
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $19, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r14d, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbp
testq %rbp, %rbp
je .LBB2_80
# %bb.35: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i105
cmpb $0, 56(%rbp)
je .LBB2_37
# %bb.36:
movzbl 67(%rbp), %ecx
jmp .LBB2_38
.LBB2_37:
movq %rbp, %rdi
movq %rax, %rbx
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbx, %rax
.LBB2_38: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit108
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.6, %esi
movl $18, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movq 144(%rsp), %rsi # 8-byte Reload
# kill: def $esi killed $esi killed $rsi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbp
testq %rbp, %rbp
je .LBB2_80
# %bb.39: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i110
cmpb $0, 56(%rbp)
je .LBB2_41
# %bb.40:
movzbl 67(%rbp), %ecx
jmp .LBB2_42
.LBB2_41:
movq %rbp, %rdi
movq %rax, %rbx
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbx, %rax
.LBB2_42: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit113
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.7, %esi
movl $11, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movaps 128(%rsp), %xmm0 # 16-byte Reload
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbp
testq %rbp, %rbp
je .LBB2_80
# %bb.43: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i115
cmpb $0, 56(%rbp)
je .LBB2_45
# %bb.44:
movzbl 67(%rbp), %ecx
jmp .LBB2_46
.LBB2_45:
movq %rbp, %rdi
movq %rax, %rbx
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbx, %rax
.LBB2_46: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit118
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
shrl %r14d
orq %r14, %r12
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_48
# %bb.47:
movq 88(%rsp), %rax
movq 16(%rsp), %rcx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movl %r15d, 12(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 72(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12createArraysPfS_i, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_48:
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_50
# %bb.49:
movq 88(%rsp), %rax
movq 16(%rsp), %rcx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movl %r15d, 12(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 72(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z5func1PfS_i, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_50:
callq hipDeviceSynchronize
testl %r15d, %r15d
jle .LBB2_53
# %bb.51: # %.lr.ph175
movq 16(%rsp), %rax
movl %r15d, %ecx
xorl %edx, %edx
movss .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movaps .LCPI2_1(%rip), %xmm1 # xmm1 = [NaN,NaN,NaN,NaN]
movaps 128(%rsp), %xmm4 # 16-byte Reload
movaps %xmm4, %xmm5
.p2align 4, 0x90
.LBB2_52: # =>This Inner Loop Header: Depth=1
movss (%rax,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm0, %xmm2
andps %xmm1, %xmm2
cmpunordss %xmm5, %xmm5
movaps %xmm5, %xmm3
andps %xmm2, %xmm3
maxss %xmm4, %xmm2
andnps %xmm2, %xmm5
orps %xmm3, %xmm5
incq %rdx
movaps %xmm5, %xmm4
cmpq %rdx, %rcx
jne .LBB2_52
jmp .LBB2_54
.LBB2_53:
movaps 128(%rsp), %xmm5 # 16-byte Reload
.LBB2_54: # %._crit_edge176
movaps %xmm5, 128(%rsp) # 16-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str.8, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB2_80
# %bb.55: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i120
cmpb $0, 56(%r12)
je .LBB2_57
# %bb.56:
movzbl 67(%r12), %eax
jmp .LBB2_58
.LBB2_57:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_58: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit123
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB2_80
# %bb.59: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i125
cmpb $0, 56(%r12)
je .LBB2_61
# %bb.60:
movzbl 67(%r12), %eax
jmp .LBB2_62
.LBB2_61:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_62: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit128
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq %rax, %r12
movl $.L.str.4, %esi
movl $12, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r12, %rdi
movl %r15d, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r15
testq %r15, %r15
je .LBB2_80
# %bb.63: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i130
cmpb $0, 56(%r15)
je .LBB2_65
# %bb.64:
movzbl 67(%r15), %ecx
jmp .LBB2_66
.LBB2_65:
movq %r15, %rdi
movq %rax, %rbx
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r15), %rax
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbx, %rax
.LBB2_66: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit133
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $19, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r14d, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB2_80
# %bb.67: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i135
cmpb $0, 56(%r14)
je .LBB2_69
# %bb.68:
movzbl 67(%r14), %ecx
jmp .LBB2_70
.LBB2_69:
movq %r14, %rdi
movq %rax, %rbx
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbx, %rax
.LBB2_70: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit138
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.6, %esi
movl $18, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movq 144(%rsp), %rsi # 8-byte Reload
# kill: def $esi killed $esi killed $rsi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB2_80
# %bb.71: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i140
cmpb $0, 56(%rbx)
je .LBB2_73
# %bb.72:
movzbl 67(%rbx), %ecx
jmp .LBB2_74
.LBB2_73:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB2_74: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit143
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.7, %esi
movl $11, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movaps 128(%rsp), %xmm0 # 16-byte Reload
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB2_80
# %bb.75: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i145
cmpb $0, 56(%rbx)
je .LBB2_77
# %bb.76:
movzbl 67(%rbx), %ecx
jmp .LBB2_78
.LBB2_77:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB2_78: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit148
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 88(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
xorl %eax, %eax
.LBB2_79:
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_80:
.cfi_def_cfa_offset 208
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5func1PfS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12createArraysPfS_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5func1PfS_i,@object # @_Z5func1PfS_i
.section .rodata,"a",@progbits
.globl _Z5func1PfS_i
.p2align 3, 0x0
_Z5func1PfS_i:
.quad _Z20__device_stub__func1PfS_i
.size _Z5func1PfS_i, 8
.type _Z12createArraysPfS_i,@object # @_Z12createArraysPfS_i
.globl _Z12createArraysPfS_i
.p2align 3, 0x0
_Z12createArraysPfS_i:
.quad _Z27__device_stub__createArraysPfS_i
.size _Z12createArraysPfS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Usage: "
.size .L.str, 8
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " <length of arrays> <num threads/block>"
.size .L.str.1, 40
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Array length and block size must be > 0"
.size .L.str.2, 40
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Phase 1"
.size .L.str.3, 8
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Array size: "
.size .L.str.4, 13
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Threads per block: "
.size .L.str.5, 20
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Number of blocks: "
.size .L.str.6, 19
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Max error: "
.size .L.str.7, 12
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Phase 2"
.size .L.str.8, 8
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z5func1PfS_i"
.size .L__unnamed_1, 14
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z12createArraysPfS_i"
.size .L__unnamed_2, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__func1PfS_i
.addrsig_sym _Z27__device_stub__createArraysPfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5func1PfS_i
.addrsig_sym _Z12createArraysPfS_i
.addrsig_sym _ZSt4cerr
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z12createArraysPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ BSSY B0, 0x310 ; /* 0x0000028000007945 */
/* 0x000fe60003800000 */
/*0090*/ IMAD R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a24 */
/* 0x000fc800078e02ff */
/*00a0*/ I2F.U32.RP R6, R0 ; /* 0x0000000000067306 */
/* 0x000e220000209000 */
/*00b0*/ IMAD.MOV R9, RZ, RZ, -R0 ; /* 0x000000ffff097224 */
/* 0x000fe200078e0a00 */
/*00c0*/ IADD3 R2, R0.reuse, R3, RZ ; /* 0x0000000300027210 */
/* 0x040fe40007ffe0ff */
/*00d0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe40003f45070 */
/*00e0*/ LOP3.LUT R7, RZ, R2, RZ, 0x33, !PT ; /* 0x00000002ff077212 */
/* 0x000fc800078e33ff */
/*00f0*/ IADD3 R7, R7, c[0x0][0x170], R0 ; /* 0x00005c0007077a10 */
/* 0x000fe20007ffe000 */
/*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*0130*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x001fe400078e00ff */
/*0140*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */
/* 0x002fc800078e02ff */
/*0150*/ IMAD.HI.U32 R2, R5, R9, R4 ; /* 0x0000000905027227 */
/* 0x000fcc00078e0004 */
/*0160*/ IMAD.HI.U32 R2, R2, R7, RZ ; /* 0x0000000702027227 */
/* 0x000fca00078e00ff */
/*0170*/ IADD3 R4, -R2, RZ, RZ ; /* 0x000000ff02047210 */
/* 0x000fca0007ffe1ff */
/*0180*/ IMAD R7, R0, R4, R7 ; /* 0x0000000400077224 */
/* 0x000fca00078e0207 */
/*0190*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f06070 */
/*01a0*/ @P0 IMAD.IADD R7, R7, 0x1, -R0 ; /* 0x0000000107070824 */
/* 0x000fe200078e0a00 */
/*01b0*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */
/* 0x000fc80007ffe0ff */
/*01c0*/ ISETP.GE.U32.AND P1, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f26070 */
/*01d0*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */
/* 0x000fe40007ffe0ff */
/*01e0*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */
/* 0x000fc800078e33ff */
/*01f0*/ IADD3 R4, R2.reuse, 0x1, RZ ; /* 0x0000000102047810 */
/* 0x040fe40007ffe0ff */
/*0200*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f26070 */
/*0210*/ LOP3.LUT P0, R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fda000780c0ff */
/*0220*/ @!P0 BRA 0x300 ; /* 0x000000d000008947 */
/* 0x000fea0003800000 */
/*0230*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x000fe200000001ff */
/*0240*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */
/* 0x000fe200078e0004 */
/*0250*/ MOV R9, 0x3f800000 ; /* 0x3f80000000097802 */
/* 0x000fd00000000f00 */
/*0260*/ IMAD.WIDE R4, R3, R6, c[0x0][0x168] ; /* 0x00005a0003047625 */
/* 0x000fc800078e0206 */
/*0270*/ IMAD.WIDE R6, R3, R6, c[0x0][0x160] ; /* 0x0000580003067625 */
/* 0x000fc800078e0206 */
/*0280*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fe20007ffe0ff */
/*0290*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e2000c101904 */
/*02a0*/ IMAD.IADD R3, R0, 0x1, R3 ; /* 0x0000000100037824 */
/* 0x000fe400078e0203 */
/*02b0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f05270 */
/*02c0*/ STG.E [R4.64], RZ ; /* 0x000000ff04007986 */
/* 0x0003e2000c101904 */
/*02d0*/ IMAD.WIDE R6, R0, 0x4, R6 ; /* 0x0000000400067825 */
/* 0x001fc800078e0206 */
/*02e0*/ IMAD.WIDE R4, R0, 0x4, R4 ; /* 0x0000000400047825 */
/* 0x002fce00078e0204 */
/*02f0*/ @P0 BRA 0x280 ; /* 0xffffff8000000947 */
/* 0x000fea000383ffff */
/*0300*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0310*/ @!P1 EXIT ; /* 0x000000000000994d */
/* 0x000fea0003800000 */
/*0320*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*0330*/ IMAD.MOV.U32 R21, RZ, RZ, 0x3f800000 ; /* 0x3f800000ff157424 */
/* 0x000fd200078e00ff */
/*0340*/ IMAD.WIDE R4, R3, R2, c[0x0][0x160] ; /* 0x0000580003047625 */
/* 0x001fc800078e0202 */
/*0350*/ IMAD.WIDE R6, R3, R2, c[0x0][0x168] ; /* 0x00005a0003067625 */
/* 0x000fe200078e0202 */
/*0360*/ STG.E [R4.64], R21 ; /* 0x0000001504007986 */
/* 0x0001e2000c101904 */
/*0370*/ IADD3 R3, R0.reuse, R3, R0 ; /* 0x0000000300037210 */
/* 0x040fe40007ffe000 */
/*0380*/ IMAD.WIDE R8, R0.reuse, 0x4, R4 ; /* 0x0000000400087825 */
/* 0x040fe200078e0204 */
/*0390*/ STG.E [R6.64], RZ ; /* 0x000000ff06007986 */
/* 0x0001e2000c101904 */
/*03a0*/ IADD3 R3, R0.reuse, R3, R0 ; /* 0x0000000300037210 */
/* 0x040fe40007ffe000 */
/*03b0*/ IMAD.WIDE R10, R0, 0x4, R6 ; /* 0x00000004000a7825 */
/* 0x000fe200078e0206 */
/*03c0*/ STG.E [R8.64], R21 ; /* 0x0000001508007986 */
/* 0x0001e2000c101904 */
/*03d0*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fc40003f06270 */
/*03e0*/ IMAD.WIDE R12, R0.reuse, 0x4, R8 ; /* 0x00000004000c7825 */
/* 0x040fe200078e0208 */
/*03f0*/ STG.E [R10.64], RZ ; /* 0x000000ff0a007986 */
/* 0x0001e6000c101904 */
/*0400*/ IMAD.WIDE R14, R0.reuse, 0x4, R10 ; /* 0x00000004000e7825 */
/* 0x040fe200078e020a */
/*0410*/ STG.E [R12.64], R21 ; /* 0x000000150c007986 */
/* 0x0001e6000c101904 */
/*0420*/ IMAD.WIDE R16, R0.reuse, 0x4, R12 ; /* 0x0000000400107825 */
/* 0x040fe200078e020c */
/*0430*/ STG.E [R14.64], RZ ; /* 0x000000ff0e007986 */
/* 0x0001e6000c101904 */
/*0440*/ IMAD.WIDE R18, R0, 0x4, R14 ; /* 0x0000000400127825 */
/* 0x000fe200078e020e */
/*0450*/ STG.E [R16.64], R21 ; /* 0x0000001510007986 */
/* 0x0001e8000c101904 */
/*0460*/ STG.E [R18.64], RZ ; /* 0x000000ff12007986 */
/* 0x0001e2000c101904 */
/*0470*/ @!P0 BRA 0x340 ; /* 0xfffffec000008947 */
/* 0x000fea000383ffff */
/*0480*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0490*/ BRA 0x490; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*04a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0500*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z5func1PfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ BSSY B0, 0x310 ; /* 0x0000028000007945 */
/* 0x000fe60003800000 */
/*0090*/ IMAD R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a24 */
/* 0x000fc800078e02ff */
/*00a0*/ I2F.U32.RP R6, R0 ; /* 0x0000000000067306 */
/* 0x000e220000209000 */
/*00b0*/ IMAD.MOV R9, RZ, RZ, -R0 ; /* 0x000000ffff097224 */
/* 0x000fe200078e0a00 */
/*00c0*/ IADD3 R2, R0.reuse, R3, RZ ; /* 0x0000000300027210 */
/* 0x040fe40007ffe0ff */
/*00d0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe40003f45070 */
/*00e0*/ LOP3.LUT R7, RZ, R2, RZ, 0x33, !PT ; /* 0x00000002ff077212 */
/* 0x000fc800078e33ff */
/*00f0*/ IADD3 R7, R7, c[0x0][0x170], R0 ; /* 0x00005c0007077a10 */
/* 0x000fe20007ffe000 */
/*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*0130*/ HFMA2.MMA R4, -RZ, RZ, 0, 0 ; /* 0x00000000ff047435 */
/* 0x001fe200000001ff */
/*0140*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */
/* 0x002fd200078e02ff */
/*0150*/ IMAD.HI.U32 R2, R5, R9, R4 ; /* 0x0000000905027227 */
/* 0x000fcc00078e0004 */
/*0160*/ IMAD.HI.U32 R2, R2, R7, RZ ; /* 0x0000000702027227 */
/* 0x000fc800078e00ff */
/*0170*/ IMAD.MOV R4, RZ, RZ, -R2 ; /* 0x000000ffff047224 */
/* 0x000fc800078e0a02 */
/*0180*/ IMAD R7, R0, R4, R7 ; /* 0x0000000400077224 */
/* 0x000fca00078e0207 */
/*0190*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f06070 */
/*01a0*/ @P0 IADD3 R7, -R0, R7, RZ ; /* 0x0000000700070210 */
/* 0x000fe40007ffe1ff */
/*01b0*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */
/* 0x000fe40007ffe0ff */
/*01c0*/ ISETP.GE.U32.AND P1, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f26070 */
/*01d0*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */
/* 0x000fe40007ffe0ff */
/*01e0*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */
/* 0x000fc800078e33ff */
/*01f0*/ IADD3 R4, R2.reuse, 0x1, RZ ; /* 0x0000000102047810 */
/* 0x040fe40007ffe0ff */
/*0200*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f26070 */
/*0210*/ LOP3.LUT P0, R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fda000780c0ff */
/*0220*/ @!P0 BRA 0x300 ; /* 0x000000d000008947 */
/* 0x000fea0003800000 */
/*0230*/ MOV R6, 0x4 ; /* 0x0000000400067802 */
/* 0x000fe20000000f00 */
/*0240*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */
/* 0x000fc800078e0004 */
/*0250*/ IMAD.WIDE R4, R3, R6, c[0x0][0x168] ; /* 0x00005a0003047625 */
/* 0x000fc800078e0206 */
/*0260*/ IMAD.WIDE R6, R3, R6, c[0x0][0x160] ; /* 0x0000580003067625 */
/* 0x000fca00078e0206 */
/*0270*/ LDG.E R8, [R6.64] ; /* 0x0000000406087981 */
/* 0x0000a2000c1e1900 */
/*0280*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fe40007ffe0ff */
/*0290*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */
/* 0x000fe40007ffe0ff */
/*02a0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f05270 */
/*02b0*/ IMAD.WIDE R6, R0, 0x4, R6 ; /* 0x0000000400067825 */
/* 0x001fc800078e0206 */
/*02c0*/ FADD R9, R8, 1 ; /* 0x3f80000008097421 */
/* 0x004fca0000000000 */
/*02d0*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0001e4000c101904 */
/*02e0*/ IMAD.WIDE R4, R0, 0x4, R4 ; /* 0x0000000400047825 */
/* 0x001fe200078e0204 */
/*02f0*/ @P0 BRA 0x270 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0300*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0310*/ @!P1 EXIT ; /* 0x000000000000994d */
/* 0x000fea0003800000 */
/*0320*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x002fd400000001ff */
/*0330*/ IMAD.WIDE R4, R3, R6, c[0x0][0x160] ; /* 0x0000580003047625 */
/* 0x000fca00078e0206 */
/*0340*/ LDG.E R2, [R4.64] ; /* 0x0000000404027981 */
/* 0x000ea2000c1e1900 */
/*0350*/ IMAD.WIDE R6, R3, R6, c[0x0][0x168] ; /* 0x00005a0003067625 */
/* 0x000fc800078e0206 */
/*0360*/ IMAD.WIDE R8, R0, 0x4, R4 ; /* 0x0000000400087825 */
/* 0x000fc800078e0204 */
/*0370*/ FADD R17, R2, 1 ; /* 0x3f80000002117421 */
/* 0x004fca0000000000 */
/*0380*/ STG.E [R6.64], R17 ; /* 0x0000001106007986 */
/* 0x0001e8000c101904 */
/*0390*/ LDG.E R2, [R8.64] ; /* 0x0000000408027981 */
/* 0x000ea2000c1e1900 */
/*03a0*/ IMAD.WIDE R10, R0, 0x4, R6 ; /* 0x00000004000a7825 */
/* 0x000fc800078e0206 */
/*03b0*/ IMAD.WIDE R12, R0, 0x4, R8 ; /* 0x00000004000c7825 */
/* 0x000fc800078e0208 */
/*03c0*/ FADD R19, R2, 1 ; /* 0x3f80000002137421 */
/* 0x004fca0000000000 */
/*03d0*/ STG.E [R10.64], R19 ; /* 0x000000130a007986 */
/* 0x0003e8000c101904 */
/*03e0*/ LDG.E R2, [R12.64] ; /* 0x000000040c027981 */
/* 0x000ea2000c1e1900 */
/*03f0*/ IMAD.WIDE R4, R0, 0x4, R10 ; /* 0x0000000400047825 */
/* 0x000fc800078e020a */
/*0400*/ IMAD.WIDE R14, R0, 0x4, R12 ; /* 0x00000004000e7825 */
/* 0x000fc800078e020c */
/*0410*/ FADD R21, R2, 1 ; /* 0x3f80000002157421 */
/* 0x004fca0000000000 */
/*0420*/ STG.E [R4.64], R21 ; /* 0x0000001504007986 */
/* 0x0003e8000c101904 */
/*0430*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ea2000c1e1900 */
/*0440*/ IMAD.WIDE R6, R0.reuse, 0x4, R4 ; /* 0x0000000400067825 */
/* 0x041fe200078e0204 */
/*0450*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*0460*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*0470*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fe20003f06270 */
/*0480*/ FADD R9, R14, 1 ; /* 0x3f8000000e097421 */
/* 0x004fca0000000000 */
/*0490*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0003ee000c101904 */
/*04a0*/ @!P0 BRA 0x320 ; /* 0xfffffe7000008947 */
/* 0x000fea000383ffff */
/*04b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*04c0*/ BRA 0x4c0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0500*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5func1PfS_i
.globl _Z5func1PfS_i
.p2align 8
.type _Z5func1PfS_i,@function
_Z5func1PfS_i:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s10, s[0:1], 0x10
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s10, v1
s_cbranch_execz .LBB0_3
s_load_b32 s2, s[2:3], 0x0
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s8
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[2:3], 2
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1)
v_add_co_u32 v4, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
global_load_b32 v0, v[4:5], off
v_add_nc_u32_e32 v1, s2, v1
v_add_co_u32 v4, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, v2, s8
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, 1.0, v0
v_cmp_le_i32_e64 s0, s10, v1
global_store_b32 v[4:5], v0, off
s_or_b32 s1, s0, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5func1PfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5func1PfS_i, .Lfunc_end0-_Z5func1PfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z12createArraysPfS_i
.globl _Z12createArraysPfS_i
.p2align 8
.type _Z12createArraysPfS_i,@function
_Z12createArraysPfS_i:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s10, s[0:1], 0x10
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s10, v1
s_cbranch_execz .LBB1_3
s_load_b32 s2, s[2:3], 0x0
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v0, 1.0
v_mov_b32_e32 v4, 0
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s8
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[2:3], 2
.p2align 6
.LBB1_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
v_add_co_u32 v5, vcc_lo, s4, v2
v_add_nc_u32_e32 v1, s2, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v7, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v3, vcc_lo
v_cmp_le_i32_e32 vcc_lo, s10, v1
v_add_co_u32 v2, s0, v2, s8
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s0, s9, v3, s0
s_or_b32 s1, vcc_lo, s1
global_store_b32 v[5:6], v0, off
global_store_b32 v[7:8], v4, off
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB1_2
.LBB1_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12createArraysPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z12createArraysPfS_i, .Lfunc_end1-_Z12createArraysPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5func1PfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z5func1PfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12createArraysPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12createArraysPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0004c55b_00000000-6_hw5.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3953:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3953:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z3addf
.type _Z3addf, @function
_Z3addf:
.LFB3949:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE3949:
.size _Z3addf, .-_Z3addf
.globl _Z27__device_stub__Z5func1PfS_iPfS_i
.type _Z27__device_stub__Z5func1PfS_iPfS_i, @function
_Z27__device_stub__Z5func1PfS_iPfS_i:
.LFB3975:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5func1PfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3975:
.size _Z27__device_stub__Z5func1PfS_iPfS_i, .-_Z27__device_stub__Z5func1PfS_iPfS_i
.globl _Z5func1PfS_i
.type _Z5func1PfS_i, @function
_Z5func1PfS_i:
.LFB3976:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z5func1PfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3976:
.size _Z5func1PfS_i, .-_Z5func1PfS_i
.globl _Z35__device_stub__Z12createArraysPfS_iPfS_i
.type _Z35__device_stub__Z12createArraysPfS_iPfS_i, @function
_Z35__device_stub__Z12createArraysPfS_iPfS_i:
.LFB3977:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12createArraysPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3977:
.size _Z35__device_stub__Z12createArraysPfS_iPfS_i, .-_Z35__device_stub__Z12createArraysPfS_iPfS_i
.globl _Z12createArraysPfS_i
.type _Z12createArraysPfS_i, @function
_Z12createArraysPfS_i:
.LFB3978:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z12createArraysPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3978:
.size _Z12createArraysPfS_i, .-_Z12createArraysPfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Usage: "
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string " <length of arrays> <num threads/block>"
.align 8
.LC3:
.string "Array length and block size must be > 0"
.section .rodata.str1.1
.LC6:
.string "Phase 1"
.LC7:
.string "Array size: "
.LC8:
.string "Threads per block: "
.LC9:
.string "Number of blocks: "
.LC10:
.string "Max error: "
.LC11:
.string "Phase 2"
.text
.globl main
.type main, @function
main:
.LFB3950:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %rsi, %rbx
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
cmpl $2, %edi
jle .L37
movq 16(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r14
movl %eax, %r12d
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbx
movl %eax, %ebp
testl %eax, %eax
jle .L33
testl %r14d, %r14d
jle .L33
movslq %eax, %r13
salq $2, %r13
leaq 16(%rsp), %rdi
movl $1, %edx
movq %r13, %rsi
call cudaMallocManaged@PLT
leaq 24(%rsp), %rdi
movl $1, %edx
movq %r13, %rsi
call cudaMallocManaged@PLT
leal -1(%r14,%rbx), %eax
cltd
idivl %r14d
movl %eax, %r13d
movl %r14d, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl %eax, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L38
.L26:
movl %r14d, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl %r13d, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L39
.L27:
call cudaDeviceSynchronize@PLT
movq 24(%rsp), %r14
movl $0, %ebx
movl $0x00000000, 12(%rsp)
.L28:
movss (%r14,%rbx,4), %xmm0
subss .LC4(%rip), %xmm0
andps .LC5(%rip), %xmm0
movss 12(%rsp), %xmm1
call fmaxf@PLT
movss %xmm0, 12(%rsp)
addq $1, %rbx
cmpl %ebx, %ebp
jg .L28
leaq .LC6(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq %rbx, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq %rax, %rdi
leaq .LC7(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebp, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC8(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %r12d, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC9(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %r13d, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC10(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $2, %ecx
movl %r12d, %eax
cltd
idivl %ecx
movl %eax, %r12d
movl %eax, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl %r13d, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L40
.L29:
movl %r12d, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl %r13d, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L41
.L30:
call cudaDeviceSynchronize@PLT
movq 24(%rsp), %r14
movl $0, %ebx
.L31:
movss (%r14,%rbx,4), %xmm0
subss .LC4(%rip), %xmm0
andps .LC5(%rip), %xmm0
movss 12(%rsp), %xmm1
call fmaxf@PLT
movss %xmm0, 12(%rsp)
addq $1, %rbx
cmpl %ebx, %ebp
jg .L31
leaq .LC11(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq %rbx, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq %rax, %rdi
leaq .LC7(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebp, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC8(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %r12d, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC9(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %r13d, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC10(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movl $0, %eax
.L21:
movq 56(%rsp), %rdx
subq %fs:40, %rdx
jne .L42
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L37:
.cfi_restore_state
leaq .LC1(%rip), %rsi
leaq _ZSt4cerr(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq (%rbx), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $1, %eax
jmp .L21
.L33:
leaq .LC3(%rip), %rsi
leaq _ZSt4cerr(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $1, %eax
jmp .L21
.L38:
movl %ebx, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z35__device_stub__Z12createArraysPfS_iPfS_i
jmp .L26
.L39:
movl %ebp, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z27__device_stub__Z5func1PfS_iPfS_i
jmp .L27
.L40:
movl %ebp, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z35__device_stub__Z12createArraysPfS_iPfS_i
jmp .L29
.L41:
movl %ebp, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z27__device_stub__Z5func1PfS_iPfS_i
jmp .L30
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3950:
.size main, .-main
.section .rodata.str1.1
.LC12:
.string "_Z12createArraysPfS_i"
.LC13:
.string "_Z5func1PfS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3980:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z12createArraysPfS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z5func1PfS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3980:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC4:
.long 1073741824
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC5:
.long 2147483647
.long 0
.long 0
.long 0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "hw5.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z20__device_stub__func1PfS_i # -- Begin function _Z20__device_stub__func1PfS_i
.p2align 4, 0x90
.type _Z20__device_stub__func1PfS_i,@function
_Z20__device_stub__func1PfS_i: # @_Z20__device_stub__func1PfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5func1PfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z20__device_stub__func1PfS_i, .Lfunc_end0-_Z20__device_stub__func1PfS_i
.cfi_endproc
# -- End function
.globl _Z27__device_stub__createArraysPfS_i # -- Begin function _Z27__device_stub__createArraysPfS_i
.p2align 4, 0x90
.type _Z27__device_stub__createArraysPfS_i,@function
_Z27__device_stub__createArraysPfS_i: # @_Z27__device_stub__createArraysPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12createArraysPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z27__device_stub__createArraysPfS_i, .Lfunc_end1-_Z27__device_stub__createArraysPfS_i
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI2_0:
.long 0xc0000000 # float -2
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI2_1:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
cmpl $2, %edi
jg .LBB2_3
# %bb.1:
movl $_ZSt4cerr, %edi
movl $.L.str, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbx), %rbx
testq %rbx, %rbx
je .LBB2_15
# %bb.2:
movq %rbx, %rdi
callq strlen
movl $_ZSt4cerr, %edi
movq %rbx, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB2_16
.LBB2_3:
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movq 8(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
testl %r15d, %r15d
jle .LBB2_12
# %bb.4:
testl %r14d, %r14d
jle .LBB2_12
# %bb.5:
movabsq $4294967296, %r12 # imm = 0x100000000
movl %r15d, %ebx
shlq $2, %rbx
leaq 88(%rsp), %rdi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
leaq 16(%rsp), %rdi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
leal (%r14,%r15), %eax
decl %eax
cltd
idivl %r14d
# kill: def $eax killed $eax def $rax
movq %rax, 144(%rsp) # 8-byte Spill
leaq (%rax,%r12), %r13
movl %r14d, %ebp
orq %r12, %rbp
movq %r13, %rdi
movl $1, %esi
movq %rbp, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_7
# %bb.6:
movq 88(%rsp), %rax
movq 16(%rsp), %rcx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movl %r15d, 12(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 72(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12createArraysPfS_i, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_7:
movq %r13, %rdi
movl $1, %esi
movq %rbp, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_9
# %bb.8:
movq 88(%rsp), %rax
movq 16(%rsp), %rcx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movl %r15d, 12(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 72(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z5func1PfS_i, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_9:
callq hipDeviceSynchronize
testl %r15d, %r15d
jle .LBB2_21
# %bb.10: # %.lr.ph
movq 16(%rsp), %rax
movl %r15d, %ecx
xorps %xmm2, %xmm2
xorl %edx, %edx
movss .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movaps .LCPI2_1(%rip), %xmm1 # xmm1 = [NaN,NaN,NaN,NaN]
movaps %xmm2, %xmm5
.p2align 4, 0x90
.LBB2_11: # =>This Inner Loop Header: Depth=1
movss (%rax,%rdx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
addss %xmm0, %xmm3
andps %xmm1, %xmm3
cmpunordss %xmm5, %xmm5
movaps %xmm5, %xmm4
andps %xmm3, %xmm4
maxss %xmm2, %xmm3
andnps %xmm3, %xmm5
orps %xmm4, %xmm5
incq %rdx
movaps %xmm5, %xmm2
cmpq %rdx, %rcx
jne .LBB2_11
jmp .LBB2_22
.LBB2_12:
movl $_ZSt4cerr, %edi
movl $.L.str.2, %esi
movl $39, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cerr(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cerr+240(%rax), %rbx
testq %rbx, %rbx
je .LBB2_80
# %bb.13: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i85
cmpb $0, 56(%rbx)
jne .LBB2_18
.LBB2_19:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
jmp .LBB2_20
.LBB2_15:
movq _ZSt4cerr(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cerr(%rax), %rdi
movl _ZSt4cerr+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB2_16: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
movl $_ZSt4cerr, %edi
movl $.L.str.1, %esi
movl $39, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cerr(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cerr+240(%rax), %rbx
testq %rbx, %rbx
je .LBB2_80
# %bb.17: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB2_19
.LBB2_18:
movzbl 67(%rbx), %eax
.LBB2_20: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cerr, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $1, %eax
jmp .LBB2_79
.LBB2_21:
xorps %xmm5, %xmm5
.LBB2_22: # %._crit_edge
movaps %xmm5, 128(%rsp) # 16-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbp
testq %rbp, %rbp
je .LBB2_80
# %bb.23: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i90
cmpb $0, 56(%rbp)
je .LBB2_25
# %bb.24:
movzbl 67(%rbp), %eax
jmp .LBB2_26
.LBB2_25:
movq %rbp, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_26: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit93
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbp
testq %rbp, %rbp
je .LBB2_80
# %bb.27: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i95
cmpb $0, 56(%rbp)
je .LBB2_29
# %bb.28:
movzbl 67(%rbp), %eax
jmp .LBB2_30
.LBB2_29:
movq %rbp, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_30: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit98
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq %rax, %rbp
movl $.L.str.4, %esi
movl $12, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %rbp, %rdi
movl %r15d, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbp
testq %rbp, %rbp
je .LBB2_80
# %bb.31: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i100
cmpb $0, 56(%rbp)
je .LBB2_33
# %bb.32:
movzbl 67(%rbp), %ecx
jmp .LBB2_34
.LBB2_33:
movq %rbp, %rdi
movq %rax, %rbx
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbx, %rax
.LBB2_34: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit103
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $19, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r14d, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbp
testq %rbp, %rbp
je .LBB2_80
# %bb.35: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i105
cmpb $0, 56(%rbp)
je .LBB2_37
# %bb.36:
movzbl 67(%rbp), %ecx
jmp .LBB2_38
.LBB2_37:
movq %rbp, %rdi
movq %rax, %rbx
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbx, %rax
.LBB2_38: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit108
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.6, %esi
movl $18, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movq 144(%rsp), %rsi # 8-byte Reload
# kill: def $esi killed $esi killed $rsi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbp
testq %rbp, %rbp
je .LBB2_80
# %bb.39: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i110
cmpb $0, 56(%rbp)
je .LBB2_41
# %bb.40:
movzbl 67(%rbp), %ecx
jmp .LBB2_42
.LBB2_41:
movq %rbp, %rdi
movq %rax, %rbx
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbx, %rax
.LBB2_42: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit113
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.7, %esi
movl $11, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movaps 128(%rsp), %xmm0 # 16-byte Reload
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbp
testq %rbp, %rbp
je .LBB2_80
# %bb.43: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i115
cmpb $0, 56(%rbp)
je .LBB2_45
# %bb.44:
movzbl 67(%rbp), %ecx
jmp .LBB2_46
.LBB2_45:
movq %rbp, %rdi
movq %rax, %rbx
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbx, %rax
.LBB2_46: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit118
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
shrl %r14d
orq %r14, %r12
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_48
# %bb.47:
movq 88(%rsp), %rax
movq 16(%rsp), %rcx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movl %r15d, 12(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 72(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12createArraysPfS_i, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_48:
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_50
# %bb.49:
movq 88(%rsp), %rax
movq 16(%rsp), %rcx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movl %r15d, 12(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 72(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z5func1PfS_i, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_50:
callq hipDeviceSynchronize
testl %r15d, %r15d
jle .LBB2_53
# %bb.51: # %.lr.ph175
movq 16(%rsp), %rax
movl %r15d, %ecx
xorl %edx, %edx
movss .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movaps .LCPI2_1(%rip), %xmm1 # xmm1 = [NaN,NaN,NaN,NaN]
movaps 128(%rsp), %xmm4 # 16-byte Reload
movaps %xmm4, %xmm5
.p2align 4, 0x90
.LBB2_52: # =>This Inner Loop Header: Depth=1
movss (%rax,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm0, %xmm2
andps %xmm1, %xmm2
cmpunordss %xmm5, %xmm5
movaps %xmm5, %xmm3
andps %xmm2, %xmm3
maxss %xmm4, %xmm2
andnps %xmm2, %xmm5
orps %xmm3, %xmm5
incq %rdx
movaps %xmm5, %xmm4
cmpq %rdx, %rcx
jne .LBB2_52
jmp .LBB2_54
.LBB2_53:
movaps 128(%rsp), %xmm5 # 16-byte Reload
.LBB2_54: # %._crit_edge176
movaps %xmm5, 128(%rsp) # 16-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str.8, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB2_80
# %bb.55: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i120
cmpb $0, 56(%r12)
je .LBB2_57
# %bb.56:
movzbl 67(%r12), %eax
jmp .LBB2_58
.LBB2_57:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_58: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit123
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB2_80
# %bb.59: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i125
cmpb $0, 56(%r12)
je .LBB2_61
# %bb.60:
movzbl 67(%r12), %eax
jmp .LBB2_62
.LBB2_61:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_62: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit128
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq %rax, %r12
movl $.L.str.4, %esi
movl $12, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r12, %rdi
movl %r15d, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r15
testq %r15, %r15
je .LBB2_80
# %bb.63: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i130
cmpb $0, 56(%r15)
je .LBB2_65
# %bb.64:
movzbl 67(%r15), %ecx
jmp .LBB2_66
.LBB2_65:
movq %r15, %rdi
movq %rax, %rbx
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r15), %rax
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbx, %rax
.LBB2_66: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit133
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $19, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r14d, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB2_80
# %bb.67: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i135
cmpb $0, 56(%r14)
je .LBB2_69
# %bb.68:
movzbl 67(%r14), %ecx
jmp .LBB2_70
.LBB2_69:
movq %r14, %rdi
movq %rax, %rbx
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbx, %rax
.LBB2_70: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit138
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.6, %esi
movl $18, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movq 144(%rsp), %rsi # 8-byte Reload
# kill: def $esi killed $esi killed $rsi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB2_80
# %bb.71: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i140
cmpb $0, 56(%rbx)
je .LBB2_73
# %bb.72:
movzbl 67(%rbx), %ecx
jmp .LBB2_74
.LBB2_73:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB2_74: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit143
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.7, %esi
movl $11, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movaps 128(%rsp), %xmm0 # 16-byte Reload
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB2_80
# %bb.75: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i145
cmpb $0, 56(%rbx)
je .LBB2_77
# %bb.76:
movzbl 67(%rbx), %ecx
jmp .LBB2_78
.LBB2_77:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB2_78: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit148
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 88(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
xorl %eax, %eax
.LBB2_79:
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_80:
.cfi_def_cfa_offset 208
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5func1PfS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12createArraysPfS_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5func1PfS_i,@object # @_Z5func1PfS_i
.section .rodata,"a",@progbits
.globl _Z5func1PfS_i
.p2align 3, 0x0
_Z5func1PfS_i:
.quad _Z20__device_stub__func1PfS_i
.size _Z5func1PfS_i, 8
.type _Z12createArraysPfS_i,@object # @_Z12createArraysPfS_i
.globl _Z12createArraysPfS_i
.p2align 3, 0x0
_Z12createArraysPfS_i:
.quad _Z27__device_stub__createArraysPfS_i
.size _Z12createArraysPfS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Usage: "
.size .L.str, 8
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " <length of arrays> <num threads/block>"
.size .L.str.1, 40
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Array length and block size must be > 0"
.size .L.str.2, 40
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Phase 1"
.size .L.str.3, 8
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Array size: "
.size .L.str.4, 13
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Threads per block: "
.size .L.str.5, 20
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Number of blocks: "
.size .L.str.6, 19
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Max error: "
.size .L.str.7, 12
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Phase 2"
.size .L.str.8, 8
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z5func1PfS_i"
.size .L__unnamed_1, 14
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z12createArraysPfS_i"
.size .L__unnamed_2, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__func1PfS_i
.addrsig_sym _Z27__device_stub__createArraysPfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5func1PfS_i
.addrsig_sym _Z12createArraysPfS_i
.addrsig_sym _ZSt4cerr
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <cuda.h>
#include <cmath>
float * mat_1d;
float * mat_2d;
float * mat_3d;
float * mat_1;
float * mat_2;
float * mat_3;
const int N = 5;
// initialize function
void init() {
int size = N*N;
// Allocate CPU Memory
mat_1 = (float*) malloc(size*sizeof(float));
mat_2 = (float*) malloc(size*sizeof(float));
mat_3 = (float*) malloc(size*sizeof(float));
// Allocate GPU Memory
cudaMalloc((void**)&mat_1d, size*sizeof(float));
cudaMalloc((void**)&mat_2d, size*sizeof(float));
cudaMalloc((void**)&mat_3d, size*sizeof(float));
// Initialize CPU Memory
for (size_t i = 0; i < size; i++) {
mat_1[i] = 3.2*(i/5) - 1.2*(i%5) + 7.5;
mat_2[i] = 1.6*(i/5) + 5.5*(i%5) - 2.2;
mat_3[i] = 0;
}
// Initialize GPU Memory
cudaMemcpy(mat_1d, mat_1, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(mat_2d, mat_2, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(mat_3d, mat_3, size*sizeof(float), cudaMemcpyHostToDevice);
}
void cleanUp() {
free(mat_1);
free(mat_2);
free(mat_3);
cudaFree(mat_1d);
cudaFree(mat_2d);
cudaFree(mat_3d);
}
void printResults(float* m) {
for (size_t i = 0; i < N; i++) {
fprintf(stdout, "[%f, %f, %f, %f, %f] \n", m[i*N], m[i*N+1], m[i*N+2], m[i*N+3], m[i*N+4]);
}
fprintf(stdout, "\n");
}
void printResultsGPU(float* md) {
float* temp = (float*) malloc(N*N*sizeof(float));
cudaMemcpy(temp, md, N*N*sizeof(float), cudaMemcpyDeviceToHost);
printResults(temp);
free(temp);
}
__global__ void mat_add(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
m3[index] = m1[index] + m2[index];
}
__global__ void mat_sub(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
m3[index] = m1[index] - m2[index];
}
__global__ void mat_mult(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
// Initialize the result value
float value = 0.0f;
// Determine the row and column number of the current element
int row = threadIdx.x;
int col = threadIdx.y;
// Loop through and compute the dot product needed for this element
for (size_t i = 0; i < N; i++) {
value += m1[row*N + i] * m2[i*N + col];
}
m3[index] = value;
}
void mat_add_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
m3[N*i+j] = m1[N*i+j] + m2[N*i+j];
}
}
}
void mat_sub_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
m3[N*i+j] = m1[N*i+j] - m2[N*i+j];
}
}
}
void mat_mult_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
float sum = 0;
for (size_t k = 0; k < N; k++) {
float a = m1[N*i + k];
float b = m2[k*N + j];
sum += a * b;
}
m3[N*i+j] = sum;
}
}
}
int main(int argc, char** argv) {
init();
dim3 dimBlock(N,N);
// Add events for profiling
cudaEvent_t beginEvent;
cudaEvent_t endEvent;
cudaEventCreate( &beginEvent );
cudaEventCreate( &endEvent );
float timeValue;
//fprintf(stdout, "GPU: \n");
// Do matrix addition on the GPU and see the result
cudaEventRecord(beginEvent, 0);
mat_add<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
cudaThreadSynchronize();
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_add kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
// Do matrix subtraction on the GPU and see the result
cudaEventRecord(beginEvent, 0);
mat_sub<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
cudaThreadSynchronize();
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_sub kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
// Do matrix multiplication on the GPU and see the result
cudaEventRecord(beginEvent, 0);
mat_mult<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
cudaThreadSynchronize();
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_mult kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
//fprintf(stdout, "CPU: \n");
// Do matrix addition on the CPU and see the result
cudaEventRecord(beginEvent, 0);
mat_add_cpu(mat_1, mat_2, mat_3);
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_add kernel time: %f.\n", timeValue);
printResults(mat_3);
// Do matrix subtraction on the CPU and see the result
cudaEventRecord(beginEvent, 0);
mat_sub_cpu(mat_1, mat_2, mat_3);
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_sub kernel time: %f.\n", timeValue);
printResults(mat_3);
// Do matrix multiplication on the CPU and see the result
cudaEventRecord(beginEvent, 0);
mat_mult_cpu(mat_1, mat_2, mat_3);
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_mult kernel time: %f.\n", timeValue);
printResults(mat_3);
cleanUp();
while (true) {
}
return 0;
} | code for sm_80
Function : _Z8mat_multPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R14, SR_TID.X ; /* 0x00000000000e7919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R19, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff137435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R17, SR_TID.Y ; /* 0x0000000000117919 */
/* 0x000e620000002200 */
/*0050*/ LEA R2, R14, R14, 0x2 ; /* 0x0000000e0e027211 */
/* 0x001fcc00078e10ff */
/*0060*/ IMAD.WIDE R4, R17, R19, c[0x0][0x168] ; /* 0x00005a0011047625 */
/* 0x002fc800078e0213 */
/*0070*/ IMAD.WIDE R2, R2, R19, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fe200078e0213 */
/*0080*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea8000c1e1900 */
/*00a0*/ LDG.E R6, [R4.64+0x14] ; /* 0x0000140404067981 */
/* 0x000ee8000c1e1900 */
/*00b0*/ LDG.E R9, [R2.64+0x4] ; /* 0x0000040402097981 */
/* 0x000ee8000c1e1900 */
/*00c0*/ LDG.E R8, [R4.64+0x28] ; /* 0x0000280404087981 */
/* 0x000f28000c1e1900 */
/*00d0*/ LDG.E R11, [R2.64+0x8] ; /* 0x00000804020b7981 */
/* 0x000f28000c1e1900 */
/*00e0*/ LDG.E R10, [R4.64+0x3c] ; /* 0x00003c04040a7981 */
/* 0x000f68000c1e1900 */
/*00f0*/ LDG.E R13, [R2.64+0xc] ; /* 0x00000c04020d7981 */
/* 0x000f68000c1e1900 */
/*0100*/ LDG.E R12, [R4.64+0x50] ; /* 0x00005004040c7981 */
/* 0x000f68000c1e1900 */
/*0110*/ LDG.E R15, [R2.64+0x10] ; /* 0x00001004020f7981 */
/* 0x000f62000c1e1900 */
/*0120*/ FFMA R0, R0, R7, RZ ; /* 0x0000000700007223 */
/* 0x004fc800000000ff */
/*0130*/ FFMA R0, R9, R6, R0 ; /* 0x0000000609007223 */
/* 0x008fe40000000000 */
/*0140*/ IMAD R6, R14, c[0x0][0x0], R17 ; /* 0x000000000e067a24 */
/* 0x000fc800078e0211 */
/*0150*/ IMAD.WIDE R6, R6, R19, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0213 */
/*0160*/ FFMA R0, R11, R8, R0 ; /* 0x000000080b007223 */
/* 0x010fc80000000000 */
/*0170*/ FFMA R0, R13, R10, R0 ; /* 0x0000000a0d007223 */
/* 0x020fc80000000000 */
/*0180*/ FFMA R15, R15, R12, R0 ; /* 0x0000000c0f0f7223 */
/* 0x000fca0000000000 */
/*0190*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x000fe2000c101904 */
/*01a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01b0*/ BRA 0x1b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z7mat_subPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e240000002200 */
/*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00b0*/ FADD R9, R2, -R5 ; /* 0x8000000502097221 */
/* 0x004fca0000000000 */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z7mat_addPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e240000002200 */
/*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00b0*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <cuda.h>
#include <cmath>
float * mat_1d;
float * mat_2d;
float * mat_3d;
float * mat_1;
float * mat_2;
float * mat_3;
const int N = 5;
// initialize function
void init() {
int size = N*N;
// Allocate CPU Memory
mat_1 = (float*) malloc(size*sizeof(float));
mat_2 = (float*) malloc(size*sizeof(float));
mat_3 = (float*) malloc(size*sizeof(float));
// Allocate GPU Memory
cudaMalloc((void**)&mat_1d, size*sizeof(float));
cudaMalloc((void**)&mat_2d, size*sizeof(float));
cudaMalloc((void**)&mat_3d, size*sizeof(float));
// Initialize CPU Memory
for (size_t i = 0; i < size; i++) {
mat_1[i] = 3.2*(i/5) - 1.2*(i%5) + 7.5;
mat_2[i] = 1.6*(i/5) + 5.5*(i%5) - 2.2;
mat_3[i] = 0;
}
// Initialize GPU Memory
cudaMemcpy(mat_1d, mat_1, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(mat_2d, mat_2, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(mat_3d, mat_3, size*sizeof(float), cudaMemcpyHostToDevice);
}
void cleanUp() {
free(mat_1);
free(mat_2);
free(mat_3);
cudaFree(mat_1d);
cudaFree(mat_2d);
cudaFree(mat_3d);
}
void printResults(float* m) {
for (size_t i = 0; i < N; i++) {
fprintf(stdout, "[%f, %f, %f, %f, %f] \n", m[i*N], m[i*N+1], m[i*N+2], m[i*N+3], m[i*N+4]);
}
fprintf(stdout, "\n");
}
void printResultsGPU(float* md) {
float* temp = (float*) malloc(N*N*sizeof(float));
cudaMemcpy(temp, md, N*N*sizeof(float), cudaMemcpyDeviceToHost);
printResults(temp);
free(temp);
}
__global__ void mat_add(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
m3[index] = m1[index] + m2[index];
}
__global__ void mat_sub(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
m3[index] = m1[index] - m2[index];
}
__global__ void mat_mult(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
// Initialize the result value
float value = 0.0f;
// Determine the row and column number of the current element
int row = threadIdx.x;
int col = threadIdx.y;
// Loop through and compute the dot product needed for this element
for (size_t i = 0; i < N; i++) {
value += m1[row*N + i] * m2[i*N + col];
}
m3[index] = value;
}
void mat_add_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
m3[N*i+j] = m1[N*i+j] + m2[N*i+j];
}
}
}
void mat_sub_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
m3[N*i+j] = m1[N*i+j] - m2[N*i+j];
}
}
}
void mat_mult_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
float sum = 0;
for (size_t k = 0; k < N; k++) {
float a = m1[N*i + k];
float b = m2[k*N + j];
sum += a * b;
}
m3[N*i+j] = sum;
}
}
}
int main(int argc, char** argv) {
init();
dim3 dimBlock(N,N);
// Add events for profiling
cudaEvent_t beginEvent;
cudaEvent_t endEvent;
cudaEventCreate( &beginEvent );
cudaEventCreate( &endEvent );
float timeValue;
//fprintf(stdout, "GPU: \n");
// Do matrix addition on the GPU and see the result
cudaEventRecord(beginEvent, 0);
mat_add<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
cudaThreadSynchronize();
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_add kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
// Do matrix subtraction on the GPU and see the result
cudaEventRecord(beginEvent, 0);
mat_sub<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
cudaThreadSynchronize();
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_sub kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
// Do matrix multiplication on the GPU and see the result
cudaEventRecord(beginEvent, 0);
mat_mult<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
cudaThreadSynchronize();
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_mult kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
//fprintf(stdout, "CPU: \n");
// Do matrix addition on the CPU and see the result
cudaEventRecord(beginEvent, 0);
mat_add_cpu(mat_1, mat_2, mat_3);
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_add kernel time: %f.\n", timeValue);
printResults(mat_3);
// Do matrix subtraction on the CPU and see the result
cudaEventRecord(beginEvent, 0);
mat_sub_cpu(mat_1, mat_2, mat_3);
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_sub kernel time: %f.\n", timeValue);
printResults(mat_3);
// Do matrix multiplication on the CPU and see the result
cudaEventRecord(beginEvent, 0);
mat_mult_cpu(mat_1, mat_2, mat_3);
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_mult kernel time: %f.\n", timeValue);
printResults(mat_3);
cleanUp();
while (true) {
}
return 0;
} | .file "tmpxft_00056fe9_00000000-6_matrix_math.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2067:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2067:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z4initv
.type _Z4initv, @function
_Z4initv:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl $100, %edi
call malloc@PLT
movq %rax, mat_1(%rip)
movl $100, %edi
call malloc@PLT
movq %rax, mat_2(%rip)
movl $100, %edi
call malloc@PLT
movq %rax, mat_3(%rip)
movl $100, %esi
leaq mat_1d(%rip), %rdi
call cudaMalloc@PLT
movl $100, %esi
leaq mat_2d(%rip), %rdi
call cudaMalloc@PLT
movl $100, %esi
leaq mat_3d(%rip), %rdi
call cudaMalloc@PLT
movl $0, %ecx
movabsq $-3689348814741910323, %rsi
movsd .LC0(%rip), %xmm8
movsd .LC1(%rip), %xmm7
movsd .LC2(%rip), %xmm6
movsd .LC3(%rip), %xmm5
movsd .LC4(%rip), %xmm4
movsd .LC5(%rip), %xmm3
jmp .L8
.L6:
movq %rax, %rdx
shrq %rdx
andl $1, %eax
orq %rax, %rdx
pxor %xmm2, %xmm2
cvtsi2sdq %rdx, %xmm2
addsd %xmm2, %xmm2
.L7:
movapd %xmm0, %xmm1
mulsd %xmm8, %xmm1
movapd %xmm2, %xmm9
mulsd %xmm7, %xmm9
subsd %xmm9, %xmm1
addsd %xmm6, %xmm1
cvtsd2ss %xmm1, %xmm1
movq mat_1(%rip), %rax
movss %xmm1, (%rax,%rcx,4)
mulsd %xmm5, %xmm0
mulsd %xmm4, %xmm2
addsd %xmm2, %xmm0
subsd %xmm3, %xmm0
cvtsd2ss %xmm0, %xmm0
movq mat_2(%rip), %rax
movss %xmm0, (%rax,%rcx,4)
movq mat_3(%rip), %rax
movl $0x00000000, (%rax,%rcx,4)
addq $1, %rcx
cmpq $25, %rcx
je .L11
.L8:
movq %rcx, %rax
mulq %rsi
shrq $2, %rdx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
movq %rcx, %rax
mulq %rsi
movq %rdx, %rax
shrq $2, %rax
andq $-4, %rdx
addq %rax, %rdx
movq %rcx, %rax
subq %rdx, %rax
js .L6
pxor %xmm2, %xmm2
cvtsi2sdq %rax, %xmm2
jmp .L7
.L11:
movl $1, %ecx
movl $100, %edx
movq mat_1(%rip), %rsi
movq mat_1d(%rip), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $100, %edx
movq mat_2(%rip), %rsi
movq mat_2d(%rip), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $100, %edx
movq mat_3(%rip), %rsi
movq mat_3d(%rip), %rdi
call cudaMemcpy@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z4initv, .-_Z4initv
.globl _Z7cleanUpv
.type _Z7cleanUpv, @function
_Z7cleanUpv:
.LFB2058:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq mat_1(%rip), %rdi
call free@PLT
movq mat_2(%rip), %rdi
call free@PLT
movq mat_3(%rip), %rdi
call free@PLT
movq mat_1d(%rip), %rdi
call cudaFree@PLT
movq mat_2d(%rip), %rdi
call cudaFree@PLT
movq mat_3d(%rip), %rdi
call cudaFree@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z7cleanUpv, .-_Z7cleanUpv
.section .rodata.str1.1,"aMS",@progbits,1
.LC7:
.string "[%f, %f, %f, %f, %f] \n"
.LC8:
.string "\n"
.text
.globl _Z12printResultsPf
.type _Z12printResultsPf, @function
_Z12printResultsPf:
.LFB2059:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %rbx
leaq 100(%rdi), %r12
leaq .LC7(%rip), %rbp
.L15:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
pxor %xmm4, %xmm4
cvtss2sd 16(%rbx), %xmm4
pxor %xmm3, %xmm3
cvtss2sd 12(%rbx), %xmm3
pxor %xmm2, %xmm2
cvtss2sd 8(%rbx), %xmm2
pxor %xmm1, %xmm1
cvtss2sd 4(%rbx), %xmm1
movq %rbp, %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $5, %eax
call __fprintf_chk@PLT
addq $20, %rbx
cmpq %r12, %rbx
jne .L15
leaq .LC8(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z12printResultsPf, .-_Z12printResultsPf
.globl _Z15printResultsGPUPf
.type _Z15printResultsGPUPf, @function
_Z15printResultsGPUPf:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbp
movl $100, %edi
call malloc@PLT
movq %rax, %rbx
movl $2, %ecx
movl $100, %edx
movq %rbp, %rsi
movq %rax, %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call _Z12printResultsPf
movq %rbx, %rdi
call free@PLT
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z15printResultsGPUPf, .-_Z15printResultsGPUPf
.globl _Z11mat_add_cpuPfS_S_
.type _Z11mat_add_cpuPfS_S_, @function
_Z11mat_add_cpuPfS_S_:
.LFB2061:
.cfi_startproc
endbr64
movl $20, %ecx
.L21:
leaq -20(%rcx), %rax
.L22:
movss (%rdi,%rax), %xmm0
addss (%rsi,%rax), %xmm0
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq %rcx, %rax
jne .L22
addq $20, %rcx
cmpq $120, %rcx
jne .L21
ret
.cfi_endproc
.LFE2061:
.size _Z11mat_add_cpuPfS_S_, .-_Z11mat_add_cpuPfS_S_
.globl _Z11mat_sub_cpuPfS_S_
.type _Z11mat_sub_cpuPfS_S_, @function
_Z11mat_sub_cpuPfS_S_:
.LFB2062:
.cfi_startproc
endbr64
movl $20, %ecx
.L26:
leaq -20(%rcx), %rax
.L27:
movss (%rdi,%rax), %xmm0
subss (%rsi,%rax), %xmm0
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq %rcx, %rax
jne .L27
addq $20, %rcx
cmpq $120, %rcx
jne .L26
ret
.cfi_endproc
.LFE2062:
.size _Z11mat_sub_cpuPfS_S_, .-_Z11mat_sub_cpuPfS_S_
.globl _Z12mat_mult_cpuPfS_S_
.type _Z12mat_mult_cpuPfS_S_, @function
_Z12mat_mult_cpuPfS_S_:
.LFB2063:
.cfi_startproc
endbr64
leaq 100(%rdx), %r10
.L31:
leaq 100(%rsi), %r8
movl $0, %r9d
.L35:
leaq -100(%r8), %rax
movq %rdi, %rcx
pxor %xmm1, %xmm1
.L32:
movss (%rcx), %xmm0
mulss (%rax), %xmm0
addss %xmm0, %xmm1
addq $4, %rcx
addq $20, %rax
cmpq %r8, %rax
jne .L32
movss %xmm1, (%rdx,%r9,4)
addq $1, %r9
addq $4, %r8
cmpq $5, %r9
jne .L35
addq $20, %rdx
addq $20, %rdi
cmpq %r10, %rdx
jne .L31
ret
.cfi_endproc
.LFE2063:
.size _Z12mat_mult_cpuPfS_S_, .-_Z12mat_mult_cpuPfS_S_
.globl _Z30__device_stub__Z7mat_addPfS_S_PfS_S_
.type _Z30__device_stub__Z7mat_addPfS_S_PfS_S_, @function
_Z30__device_stub__Z7mat_addPfS_S_PfS_S_:
.LFB2089:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L41
.L37:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L42
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7mat_addPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L37
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2089:
.size _Z30__device_stub__Z7mat_addPfS_S_PfS_S_, .-_Z30__device_stub__Z7mat_addPfS_S_PfS_S_
.globl _Z7mat_addPfS_S_
.type _Z7mat_addPfS_S_, @function
_Z7mat_addPfS_S_:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z7mat_addPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _Z7mat_addPfS_S_, .-_Z7mat_addPfS_S_
.globl _Z30__device_stub__Z7mat_subPfS_S_PfS_S_
.type _Z30__device_stub__Z7mat_subPfS_S_PfS_S_, @function
_Z30__device_stub__Z7mat_subPfS_S_PfS_S_:
.LFB2091:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L49
.L45:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L50
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L49:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7mat_subPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L45
.L50:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2091:
.size _Z30__device_stub__Z7mat_subPfS_S_PfS_S_, .-_Z30__device_stub__Z7mat_subPfS_S_PfS_S_
.globl _Z7mat_subPfS_S_
.type _Z7mat_subPfS_S_, @function
_Z7mat_subPfS_S_:
.LFB2092:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z7mat_subPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size _Z7mat_subPfS_S_, .-_Z7mat_subPfS_S_
.globl _Z31__device_stub__Z8mat_multPfS_S_PfS_S_
.type _Z31__device_stub__Z8mat_multPfS_S_PfS_S_, @function
_Z31__device_stub__Z8mat_multPfS_S_PfS_S_:
.LFB2093:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L57
.L53:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L58
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L57:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8mat_multPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L53
.L58:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2093:
.size _Z31__device_stub__Z8mat_multPfS_S_PfS_S_, .-_Z31__device_stub__Z8mat_multPfS_S_PfS_S_
.globl _Z8mat_multPfS_S_
.type _Z8mat_multPfS_S_, @function
_Z8mat_multPfS_S_:
.LFB2094:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z8mat_multPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2094:
.size _Z8mat_multPfS_S_, .-_Z8mat_multPfS_S_
.section .rodata.str1.1
.LC9:
.string "GPU mat_add kernel time: %f.\n"
.LC10:
.string "GPU mat_sub kernel time: %f.\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC11:
.string "GPU mat_mult kernel time: %f.\n"
.section .rodata.str1.1
.LC12:
.string "CPU mat_add kernel time: %f.\n"
.LC13:
.string "CPU mat_sub kernel time: %f.\n"
.section .rodata.str1.8
.align 8
.LC14:
.string "CPU mat_mult kernel time: %f.\n"
.text
.globl main
.type main, @function
main:
.LFB2064:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $64, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
call _Z4initv
movl $5, 32(%rsp)
movl $5, 36(%rsp)
movl $1, 40(%rsp)
leaq 16(%rsp), %rdi
call cudaEventCreate@PLT
leaq 24(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl 40(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L68
.L62:
call cudaThreadSynchronize@PLT
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movq 24(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rdi
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
leaq .LC9(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
movq mat_3d(%rip), %rdi
call _Z15printResultsGPUPf
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl 40(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L69
.L63:
call cudaThreadSynchronize@PLT
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movq 24(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rdi
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
leaq .LC10(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
movq mat_3d(%rip), %rdi
call _Z15printResultsGPUPf
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl 40(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L70
.L64:
call cudaThreadSynchronize@PLT
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movq 24(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rbx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
leaq .LC11(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
movq mat_3d(%rip), %rdi
call _Z15printResultsGPUPf
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movq mat_3(%rip), %rdx
movq mat_2(%rip), %rsi
movq mat_1(%rip), %rdi
call _Z11mat_add_cpuPfS_S_
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movq 24(%rsp), %rdi
call cudaEventSynchronize@PLT
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
leaq .LC12(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
movq mat_3(%rip), %rdi
call _Z12printResultsPf
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movq mat_3(%rip), %rdx
movq mat_2(%rip), %rsi
movq mat_1(%rip), %rdi
call _Z11mat_sub_cpuPfS_S_
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movq 24(%rsp), %rdi
call cudaEventSynchronize@PLT
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
leaq .LC13(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
movq mat_3(%rip), %rdi
call _Z12printResultsPf
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movq mat_3(%rip), %rdx
movq mat_2(%rip), %rsi
movq mat_1(%rip), %rdi
call _Z12mat_mult_cpuPfS_S_
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movq 24(%rsp), %rdi
call cudaEventSynchronize@PLT
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
leaq .LC14(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
movq mat_3(%rip), %rdi
call _Z12printResultsPf
call _Z7cleanUpv
.L65:
jmp .L65
.L68:
movq mat_3d(%rip), %rdx
movq mat_2d(%rip), %rsi
movq mat_1d(%rip), %rdi
call _Z30__device_stub__Z7mat_addPfS_S_PfS_S_
jmp .L62
.L69:
movq mat_3d(%rip), %rdx
movq mat_2d(%rip), %rsi
movq mat_1d(%rip), %rdi
call _Z30__device_stub__Z7mat_subPfS_S_PfS_S_
jmp .L63
.L70:
movq mat_3d(%rip), %rdx
movq mat_2d(%rip), %rsi
movq mat_1d(%rip), %rdi
call _Z31__device_stub__Z8mat_multPfS_S_PfS_S_
jmp .L64
.cfi_endproc
.LFE2064:
.size main, .-main
.section .rodata.str1.1
.LC15:
.string "_Z8mat_multPfS_S_"
.LC16:
.string "_Z7mat_subPfS_S_"
.LC17:
.string "_Z7mat_addPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2096:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z8mat_multPfS_S_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC16(%rip), %rdx
movq %rdx, %rcx
leaq _Z7mat_subPfS_S_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC17(%rip), %rdx
movq %rdx, %rcx
leaq _Z7mat_addPfS_S_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl mat_3
.bss
.align 8
.type mat_3, @object
.size mat_3, 8
mat_3:
.zero 8
.globl mat_2
.align 8
.type mat_2, @object
.size mat_2, 8
mat_2:
.zero 8
.globl mat_1
.align 8
.type mat_1, @object
.size mat_1, 8
mat_1:
.zero 8
.globl mat_3d
.align 8
.type mat_3d, @object
.size mat_3d, 8
mat_3d:
.zero 8
.globl mat_2d
.align 8
.type mat_2d, @object
.size mat_2d, 8
mat_2d:
.zero 8
.globl mat_1d
.align 8
.type mat_1d, @object
.size mat_1d, 8
mat_1d:
.zero 8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -1717986918
.long 1074370969
.align 8
.LC1:
.long 858993459
.long 1072902963
.align 8
.LC2:
.long 0
.long 1075707904
.align 8
.LC3:
.long -1717986918
.long 1073322393
.align 8
.LC4:
.long 0
.long 1075183616
.align 8
.LC5:
.long -1717986918
.long 1073846681
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <cuda.h>
#include <cmath>
float * mat_1d;
float * mat_2d;
float * mat_3d;
float * mat_1;
float * mat_2;
float * mat_3;
const int N = 5;
// initialize function
void init() {
int size = N*N;
// Allocate CPU Memory
mat_1 = (float*) malloc(size*sizeof(float));
mat_2 = (float*) malloc(size*sizeof(float));
mat_3 = (float*) malloc(size*sizeof(float));
// Allocate GPU Memory
cudaMalloc((void**)&mat_1d, size*sizeof(float));
cudaMalloc((void**)&mat_2d, size*sizeof(float));
cudaMalloc((void**)&mat_3d, size*sizeof(float));
// Initialize CPU Memory
for (size_t i = 0; i < size; i++) {
mat_1[i] = 3.2*(i/5) - 1.2*(i%5) + 7.5;
mat_2[i] = 1.6*(i/5) + 5.5*(i%5) - 2.2;
mat_3[i] = 0;
}
// Initialize GPU Memory
cudaMemcpy(mat_1d, mat_1, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(mat_2d, mat_2, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(mat_3d, mat_3, size*sizeof(float), cudaMemcpyHostToDevice);
}
void cleanUp() {
free(mat_1);
free(mat_2);
free(mat_3);
cudaFree(mat_1d);
cudaFree(mat_2d);
cudaFree(mat_3d);
}
void printResults(float* m) {
for (size_t i = 0; i < N; i++) {
fprintf(stdout, "[%f, %f, %f, %f, %f] \n", m[i*N], m[i*N+1], m[i*N+2], m[i*N+3], m[i*N+4]);
}
fprintf(stdout, "\n");
}
void printResultsGPU(float* md) {
float* temp = (float*) malloc(N*N*sizeof(float));
cudaMemcpy(temp, md, N*N*sizeof(float), cudaMemcpyDeviceToHost);
printResults(temp);
free(temp);
}
__global__ void mat_add(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
m3[index] = m1[index] + m2[index];
}
__global__ void mat_sub(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
m3[index] = m1[index] - m2[index];
}
__global__ void mat_mult(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
// Initialize the result value
float value = 0.0f;
// Determine the row and column number of the current element
int row = threadIdx.x;
int col = threadIdx.y;
// Loop through and compute the dot product needed for this element
for (size_t i = 0; i < N; i++) {
value += m1[row*N + i] * m2[i*N + col];
}
m3[index] = value;
}
void mat_add_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
m3[N*i+j] = m1[N*i+j] + m2[N*i+j];
}
}
}
void mat_sub_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
m3[N*i+j] = m1[N*i+j] - m2[N*i+j];
}
}
}
void mat_mult_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
float sum = 0;
for (size_t k = 0; k < N; k++) {
float a = m1[N*i + k];
float b = m2[k*N + j];
sum += a * b;
}
m3[N*i+j] = sum;
}
}
}
int main(int argc, char** argv) {
init();
dim3 dimBlock(N,N);
// Add events for profiling
cudaEvent_t beginEvent;
cudaEvent_t endEvent;
cudaEventCreate( &beginEvent );
cudaEventCreate( &endEvent );
float timeValue;
//fprintf(stdout, "GPU: \n");
// Do matrix addition on the GPU and see the result
cudaEventRecord(beginEvent, 0);
mat_add<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
cudaThreadSynchronize();
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_add kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
// Do matrix subtraction on the GPU and see the result
cudaEventRecord(beginEvent, 0);
mat_sub<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
cudaThreadSynchronize();
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_sub kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
// Do matrix multiplication on the GPU and see the result
cudaEventRecord(beginEvent, 0);
mat_mult<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
cudaThreadSynchronize();
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_mult kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
//fprintf(stdout, "CPU: \n");
// Do matrix addition on the CPU and see the result
cudaEventRecord(beginEvent, 0);
mat_add_cpu(mat_1, mat_2, mat_3);
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_add kernel time: %f.\n", timeValue);
printResults(mat_3);
// Do matrix subtraction on the CPU and see the result
cudaEventRecord(beginEvent, 0);
mat_sub_cpu(mat_1, mat_2, mat_3);
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_sub kernel time: %f.\n", timeValue);
printResults(mat_3);
// Do matrix multiplication on the CPU and see the result
cudaEventRecord(beginEvent, 0);
mat_mult_cpu(mat_1, mat_2, mat_3);
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_mult kernel time: %f.\n", timeValue);
printResults(mat_3);
cleanUp();
while (true) {
}
return 0;
} | #include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
float * mat_1d;
float * mat_2d;
float * mat_3d;
float * mat_1;
float * mat_2;
float * mat_3;
const int N = 5;
// initialize function
void init() {
int size = N*N;
// Allocate CPU Memory
mat_1 = (float*) malloc(size*sizeof(float));
mat_2 = (float*) malloc(size*sizeof(float));
mat_3 = (float*) malloc(size*sizeof(float));
// Allocate GPU Memory
hipMalloc((void**)&mat_1d, size*sizeof(float));
hipMalloc((void**)&mat_2d, size*sizeof(float));
hipMalloc((void**)&mat_3d, size*sizeof(float));
// Initialize CPU Memory
for (size_t i = 0; i < size; i++) {
mat_1[i] = 3.2*(i/5) - 1.2*(i%5) + 7.5;
mat_2[i] = 1.6*(i/5) + 5.5*(i%5) - 2.2;
mat_3[i] = 0;
}
// Initialize GPU Memory
hipMemcpy(mat_1d, mat_1, size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(mat_2d, mat_2, size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(mat_3d, mat_3, size*sizeof(float), hipMemcpyHostToDevice);
}
void cleanUp() {
free(mat_1);
free(mat_2);
free(mat_3);
hipFree(mat_1d);
hipFree(mat_2d);
hipFree(mat_3d);
}
void printResults(float* m) {
for (size_t i = 0; i < N; i++) {
fprintf(stdout, "[%f, %f, %f, %f, %f] \n", m[i*N], m[i*N+1], m[i*N+2], m[i*N+3], m[i*N+4]);
}
fprintf(stdout, "\n");
}
void printResultsGPU(float* md) {
float* temp = (float*) malloc(N*N*sizeof(float));
hipMemcpy(temp, md, N*N*sizeof(float), hipMemcpyDeviceToHost);
printResults(temp);
free(temp);
}
__global__ void mat_add(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
m3[index] = m1[index] + m2[index];
}
__global__ void mat_sub(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
m3[index] = m1[index] - m2[index];
}
__global__ void mat_mult(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
// Initialize the result value
float value = 0.0f;
// Determine the row and column number of the current element
int row = threadIdx.x;
int col = threadIdx.y;
// Loop through and compute the dot product needed for this element
for (size_t i = 0; i < N; i++) {
value += m1[row*N + i] * m2[i*N + col];
}
m3[index] = value;
}
void mat_add_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
m3[N*i+j] = m1[N*i+j] + m2[N*i+j];
}
}
}
void mat_sub_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
m3[N*i+j] = m1[N*i+j] - m2[N*i+j];
}
}
}
void mat_mult_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
float sum = 0;
for (size_t k = 0; k < N; k++) {
float a = m1[N*i + k];
float b = m2[k*N + j];
sum += a * b;
}
m3[N*i+j] = sum;
}
}
}
int main(int argc, char** argv) {
init();
dim3 dimBlock(N,N);
// Add events for profiling
hipEvent_t beginEvent;
hipEvent_t endEvent;
hipEventCreate( &beginEvent );
hipEventCreate( &endEvent );
float timeValue;
//fprintf(stdout, "GPU: \n");
// Do matrix addition on the GPU and see the result
hipEventRecord(beginEvent, 0);
mat_add<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
hipDeviceSynchronize();
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_add kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
// Do matrix subtraction on the GPU and see the result
hipEventRecord(beginEvent, 0);
mat_sub<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
hipDeviceSynchronize();
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_sub kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
// Do matrix multiplication on the GPU and see the result
hipEventRecord(beginEvent, 0);
mat_mult<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
hipDeviceSynchronize();
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_mult kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
//fprintf(stdout, "CPU: \n");
// Do matrix addition on the CPU and see the result
hipEventRecord(beginEvent, 0);
mat_add_cpu(mat_1, mat_2, mat_3);
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_add kernel time: %f.\n", timeValue);
printResults(mat_3);
// Do matrix subtraction on the CPU and see the result
hipEventRecord(beginEvent, 0);
mat_sub_cpu(mat_1, mat_2, mat_3);
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_sub kernel time: %f.\n", timeValue);
printResults(mat_3);
// Do matrix multiplication on the CPU and see the result
hipEventRecord(beginEvent, 0);
mat_mult_cpu(mat_1, mat_2, mat_3);
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_mult kernel time: %f.\n", timeValue);
printResults(mat_3);
cleanUp();
while (true) {
}
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
float * mat_1d;
float * mat_2d;
float * mat_3d;
float * mat_1;
float * mat_2;
float * mat_3;
const int N = 5;
// initialize function
void init() {
int size = N*N;
// Allocate CPU Memory
mat_1 = (float*) malloc(size*sizeof(float));
mat_2 = (float*) malloc(size*sizeof(float));
mat_3 = (float*) malloc(size*sizeof(float));
// Allocate GPU Memory
hipMalloc((void**)&mat_1d, size*sizeof(float));
hipMalloc((void**)&mat_2d, size*sizeof(float));
hipMalloc((void**)&mat_3d, size*sizeof(float));
// Initialize CPU Memory
for (size_t i = 0; i < size; i++) {
mat_1[i] = 3.2*(i/5) - 1.2*(i%5) + 7.5;
mat_2[i] = 1.6*(i/5) + 5.5*(i%5) - 2.2;
mat_3[i] = 0;
}
// Initialize GPU Memory
hipMemcpy(mat_1d, mat_1, size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(mat_2d, mat_2, size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(mat_3d, mat_3, size*sizeof(float), hipMemcpyHostToDevice);
}
void cleanUp() {
free(mat_1);
free(mat_2);
free(mat_3);
hipFree(mat_1d);
hipFree(mat_2d);
hipFree(mat_3d);
}
void printResults(float* m) {
for (size_t i = 0; i < N; i++) {
fprintf(stdout, "[%f, %f, %f, %f, %f] \n", m[i*N], m[i*N+1], m[i*N+2], m[i*N+3], m[i*N+4]);
}
fprintf(stdout, "\n");
}
void printResultsGPU(float* md) {
float* temp = (float*) malloc(N*N*sizeof(float));
hipMemcpy(temp, md, N*N*sizeof(float), hipMemcpyDeviceToHost);
printResults(temp);
free(temp);
}
__global__ void mat_add(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
m3[index] = m1[index] + m2[index];
}
__global__ void mat_sub(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
m3[index] = m1[index] - m2[index];
}
__global__ void mat_mult(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
// Initialize the result value
float value = 0.0f;
// Determine the row and column number of the current element
int row = threadIdx.x;
int col = threadIdx.y;
// Loop through and compute the dot product needed for this element
for (size_t i = 0; i < N; i++) {
value += m1[row*N + i] * m2[i*N + col];
}
m3[index] = value;
}
void mat_add_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
m3[N*i+j] = m1[N*i+j] + m2[N*i+j];
}
}
}
void mat_sub_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
m3[N*i+j] = m1[N*i+j] - m2[N*i+j];
}
}
}
void mat_mult_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
float sum = 0;
for (size_t k = 0; k < N; k++) {
float a = m1[N*i + k];
float b = m2[k*N + j];
sum += a * b;
}
m3[N*i+j] = sum;
}
}
}
int main(int argc, char** argv) {
init();
dim3 dimBlock(N,N);
// Add events for profiling
hipEvent_t beginEvent;
hipEvent_t endEvent;
hipEventCreate( &beginEvent );
hipEventCreate( &endEvent );
float timeValue;
//fprintf(stdout, "GPU: \n");
// Do matrix addition on the GPU and see the result
hipEventRecord(beginEvent, 0);
mat_add<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
hipDeviceSynchronize();
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_add kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
// Do matrix subtraction on the GPU and see the result
hipEventRecord(beginEvent, 0);
mat_sub<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
hipDeviceSynchronize();
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_sub kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
// Do matrix multiplication on the GPU and see the result
hipEventRecord(beginEvent, 0);
mat_mult<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
hipDeviceSynchronize();
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_mult kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
//fprintf(stdout, "CPU: \n");
// Do matrix addition on the CPU and see the result
hipEventRecord(beginEvent, 0);
mat_add_cpu(mat_1, mat_2, mat_3);
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_add kernel time: %f.\n", timeValue);
printResults(mat_3);
// Do matrix subtraction on the CPU and see the result
hipEventRecord(beginEvent, 0);
mat_sub_cpu(mat_1, mat_2, mat_3);
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_sub kernel time: %f.\n", timeValue);
printResults(mat_3);
// Do matrix multiplication on the CPU and see the result
hipEventRecord(beginEvent, 0);
mat_mult_cpu(mat_1, mat_2, mat_3);
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_mult kernel time: %f.\n", timeValue);
printResults(mat_3);
cleanUp();
while (true) {
}
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7mat_addPfS_S_
.globl _Z7mat_addPfS_S_
.p2align 8
.type _Z7mat_addPfS_S_,@function
_Z7mat_addPfS_S_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_u32_u24_e32 v1, s2, v1
v_add_lshl_u32 v0, v1, v0, 2
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7mat_addPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7mat_addPfS_S_, .Lfunc_end0-_Z7mat_addPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z7mat_subPfS_S_
.globl _Z7mat_subPfS_S_
.p2align 8
.type _Z7mat_subPfS_S_,@function
_Z7mat_subPfS_S_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_u32_u24_e32 v1, s2, v1
v_add_lshl_u32 v0, v1, v0, 2
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_sub_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7mat_subPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z7mat_subPfS_S_, .Lfunc_end1-_Z7mat_subPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z8mat_multPfS_S_
.globl _Z8mat_multPfS_S_
.p2align 8
.type _Z8mat_multPfS_S_,@function
_Z8mat_multPfS_S_:
s_clause 0x1
s_load_b128 s[8:11], s[0:1], 0x0
s_load_b32 s4, s[0:1], 0x24
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
v_mov_b32_e32 v6, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_u32_u24_e32 v1, 5, v2
v_lshlrev_b32_e32 v0, 2, v1
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b32_e32 v1, 2, v3
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, s2, s8, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v5, null, s9, 0, s2
v_add_co_u32 v0, s2, s10, v1
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v1, null, s11, 0, s2
s_mov_b64 s[2:3], 0
.LBB2_1:
s_delay_alu instid0(SALU_CYCLE_1)
v_add_co_u32 v7, vcc_lo, v4, s2
v_add_co_ci_u32_e32 v8, vcc_lo, s3, v5, vcc_lo
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
global_load_b32 v9, v[0:1], off
global_load_b32 v7, v[7:8], off
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_cmp_eq_u64 s[2:3], 20
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v6, v7, v9
s_cbranch_scc0 .LBB2_1
s_load_b64 s[0:1], s[0:1], 0x10
s_and_b32 s2, 0xffff, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_u32_u24_e32 v0, s2, v2
v_add_lshl_u32 v0, v0, v3, 2
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v6, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8mat_multPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 12
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z8mat_multPfS_S_, .Lfunc_end2-_Z8mat_multPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7mat_addPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z7mat_addPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7mat_subPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z7mat_subPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8mat_multPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 14
.sgpr_spill_count: 0
.symbol: _Z8mat_multPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
float * mat_1d;
float * mat_2d;
float * mat_3d;
float * mat_1;
float * mat_2;
float * mat_3;
const int N = 5;
// initialize function
void init() {
int size = N*N;
// Allocate CPU Memory
mat_1 = (float*) malloc(size*sizeof(float));
mat_2 = (float*) malloc(size*sizeof(float));
mat_3 = (float*) malloc(size*sizeof(float));
// Allocate GPU Memory
hipMalloc((void**)&mat_1d, size*sizeof(float));
hipMalloc((void**)&mat_2d, size*sizeof(float));
hipMalloc((void**)&mat_3d, size*sizeof(float));
// Initialize CPU Memory
for (size_t i = 0; i < size; i++) {
mat_1[i] = 3.2*(i/5) - 1.2*(i%5) + 7.5;
mat_2[i] = 1.6*(i/5) + 5.5*(i%5) - 2.2;
mat_3[i] = 0;
}
// Initialize GPU Memory
hipMemcpy(mat_1d, mat_1, size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(mat_2d, mat_2, size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(mat_3d, mat_3, size*sizeof(float), hipMemcpyHostToDevice);
}
void cleanUp() {
free(mat_1);
free(mat_2);
free(mat_3);
hipFree(mat_1d);
hipFree(mat_2d);
hipFree(mat_3d);
}
void printResults(float* m) {
for (size_t i = 0; i < N; i++) {
fprintf(stdout, "[%f, %f, %f, %f, %f] \n", m[i*N], m[i*N+1], m[i*N+2], m[i*N+3], m[i*N+4]);
}
fprintf(stdout, "\n");
}
void printResultsGPU(float* md) {
float* temp = (float*) malloc(N*N*sizeof(float));
hipMemcpy(temp, md, N*N*sizeof(float), hipMemcpyDeviceToHost);
printResults(temp);
free(temp);
}
__global__ void mat_add(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
m3[index] = m1[index] + m2[index];
}
__global__ void mat_sub(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
m3[index] = m1[index] - m2[index];
}
__global__ void mat_mult(float* m1, float* m2, float* m3) {
int index = (threadIdx.x * blockDim.x) + threadIdx.y;
// Initialize the result value
float value = 0.0f;
// Determine the row and column number of the current element
int row = threadIdx.x;
int col = threadIdx.y;
// Loop through and compute the dot product needed for this element
for (size_t i = 0; i < N; i++) {
value += m1[row*N + i] * m2[i*N + col];
}
m3[index] = value;
}
void mat_add_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
m3[N*i+j] = m1[N*i+j] + m2[N*i+j];
}
}
}
void mat_sub_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
m3[N*i+j] = m1[N*i+j] - m2[N*i+j];
}
}
}
void mat_mult_cpu(float* m1, float* m2, float* m3) {
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
float sum = 0;
for (size_t k = 0; k < N; k++) {
float a = m1[N*i + k];
float b = m2[k*N + j];
sum += a * b;
}
m3[N*i+j] = sum;
}
}
}
int main(int argc, char** argv) {
init();
dim3 dimBlock(N,N);
// Add events for profiling
hipEvent_t beginEvent;
hipEvent_t endEvent;
hipEventCreate( &beginEvent );
hipEventCreate( &endEvent );
float timeValue;
//fprintf(stdout, "GPU: \n");
// Do matrix addition on the GPU and see the result
hipEventRecord(beginEvent, 0);
mat_add<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
hipDeviceSynchronize();
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_add kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
// Do matrix subtraction on the GPU and see the result
hipEventRecord(beginEvent, 0);
mat_sub<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
hipDeviceSynchronize();
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_sub kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
// Do matrix multiplication on the GPU and see the result
hipEventRecord(beginEvent, 0);
mat_mult<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d);
hipDeviceSynchronize();
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "GPU mat_mult kernel time: %f.\n", timeValue);
printResultsGPU(mat_3d);
//fprintf(stdout, "CPU: \n");
// Do matrix addition on the CPU and see the result
hipEventRecord(beginEvent, 0);
mat_add_cpu(mat_1, mat_2, mat_3);
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_add kernel time: %f.\n", timeValue);
printResults(mat_3);
// Do matrix subtraction on the CPU and see the result
hipEventRecord(beginEvent, 0);
mat_sub_cpu(mat_1, mat_2, mat_3);
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_sub kernel time: %f.\n", timeValue);
printResults(mat_3);
// Do matrix multiplication on the CPU and see the result
hipEventRecord(beginEvent, 0);
mat_mult_cpu(mat_1, mat_2, mat_3);
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeValue, beginEvent, endEvent);
fprintf(stdout, "CPU mat_mult kernel time: %f.\n", timeValue);
printResults(mat_3);
cleanUp();
while (true) {
}
return 0;
} | .text
.file "matrix_math.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z4initv
.LCPI0_0:
.quad 0x400999999999999a # double 3.2000000000000002
.LCPI0_3:
.quad 0xbff3333333333333 # double -1.2
.LCPI0_4:
.quad 0x401e000000000000 # double 7.5
.LCPI0_5:
.quad 0x3ff999999999999a # double 1.6000000000000001
.LCPI0_6:
.quad 0x4016000000000000 # double 5.5
.LCPI0_7:
.quad 0xc00199999999999a # double -2.2000000000000002
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI0_1:
.long 1127219200 # 0x43300000
.long 1160773632 # 0x45300000
.long 0 # 0x0
.long 0 # 0x0
.LCPI0_2:
.quad 0x4330000000000000 # double 4503599627370496
.quad 0x4530000000000000 # double 1.9342813113834067E+25
.text
.globl _Z4initv
.p2align 4, 0x90
.type _Z4initv,@function
_Z4initv: # @_Z4initv
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movl $100, %edi
callq malloc
movq %rax, mat_1(%rip)
movl $100, %edi
callq malloc
movq %rax, mat_2(%rip)
movl $100, %edi
callq malloc
movq %rax, mat_3(%rip)
movl $mat_1d, %edi
movl $100, %esi
callq hipMalloc
movl $mat_2d, %edi
movl $100, %esi
callq hipMalloc
movl $mat_3d, %edi
movl $100, %esi
callq hipMalloc
movq mat_1(%rip), %rsi
movq mat_2(%rip), %rdi
xorl %ecx, %ecx
movq mat_3(%rip), %r8
movabsq $-3689348814741910323, %r9 # imm = 0xCCCCCCCCCCCCCCCD
movsd .LCPI0_0(%rip), %xmm0 # xmm0 = mem[0],zero
movdqa .LCPI0_1(%rip), %xmm1 # xmm1 = [1127219200,1160773632,0,0]
movapd .LCPI0_2(%rip), %xmm2 # xmm2 = [4.503599627370496E+15,1.9342813113834067E+25]
movsd .LCPI0_3(%rip), %xmm3 # xmm3 = mem[0],zero
movsd .LCPI0_4(%rip), %xmm4 # xmm4 = mem[0],zero
movsd .LCPI0_5(%rip), %xmm5 # xmm5 = mem[0],zero
movsd .LCPI0_6(%rip), %xmm6 # xmm6 = mem[0],zero
movsd .LCPI0_7(%rip), %xmm7 # xmm7 = mem[0],zero
.p2align 4, 0x90
.LBB0_1: # =>This Inner Loop Header: Depth=1
movq %rcx, %rax
mulq %r9
shrq $2, %rdx
leaq (%rdx,%rdx,4), %rax
xorps %xmm8, %xmm8
cvtsi2sd %rdx, %xmm8
movapd %xmm8, %xmm9
mulsd %xmm0, %xmm9
movq %rcx, %rdx
subq %rax, %rdx
movq %rdx, %xmm10
punpckldq %xmm1, %xmm10 # xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1]
subpd %xmm2, %xmm10
movapd %xmm10, %xmm11
unpckhpd %xmm10, %xmm11 # xmm11 = xmm11[1],xmm10[1]
addsd %xmm10, %xmm11
movapd %xmm11, %xmm10
mulsd %xmm3, %xmm10
addsd %xmm9, %xmm10
addsd %xmm4, %xmm10
xorps %xmm9, %xmm9
cvtsd2ss %xmm10, %xmm9
movss %xmm9, (%rsi,%rcx,4)
mulsd %xmm5, %xmm8
mulsd %xmm6, %xmm11
addsd %xmm8, %xmm11
addsd %xmm7, %xmm11
xorps %xmm8, %xmm8
cvtsd2ss %xmm11, %xmm8
movss %xmm8, (%rdi,%rcx,4)
movl $0, (%r8,%rcx,4)
incq %rcx
cmpq $25, %rcx
jne .LBB0_1
# %bb.2:
movq mat_1d(%rip), %rdi
movq mat_1(%rip), %rsi
movl $100, %edx
movl $1, %ecx
callq hipMemcpy
movq mat_2d(%rip), %rdi
movq mat_2(%rip), %rsi
movl $100, %edx
movl $1, %ecx
callq hipMemcpy
movq mat_3d(%rip), %rdi
movq mat_3(%rip), %rsi
movl $100, %edx
movl $1, %ecx
popq %rax
.cfi_def_cfa_offset 8
jmp hipMemcpy # TAILCALL
.Lfunc_end0:
.size _Z4initv, .Lfunc_end0-_Z4initv
.cfi_endproc
# -- End function
.globl _Z7cleanUpv # -- Begin function _Z7cleanUpv
.p2align 4, 0x90
.type _Z7cleanUpv,@function
_Z7cleanUpv: # @_Z7cleanUpv
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movq mat_1(%rip), %rdi
callq free
movq mat_2(%rip), %rdi
callq free
movq mat_3(%rip), %rdi
callq free
movq mat_1d(%rip), %rdi
callq hipFree
movq mat_2d(%rip), %rdi
callq hipFree
movq mat_3d(%rip), %rdi
popq %rax
.cfi_def_cfa_offset 8
jmp hipFree # TAILCALL
.Lfunc_end1:
.size _Z7cleanUpv, .Lfunc_end1-_Z7cleanUpv
.cfi_endproc
# -- End function
.globl _Z12printResultsPf # -- Begin function _Z12printResultsPf
.p2align 4, 0x90
.type _Z12printResultsPf,@function
_Z12printResultsPf: # @_Z12printResultsPf
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rdi, %rbx
movl $16, %r14d
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
movq stdout(%rip), %rdi
movss -16(%rbx,%r14), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss -12(%rbx,%r14), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movss -8(%rbx,%r14), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
movss -4(%rbx,%r14), %xmm3 # xmm3 = mem[0],zero,zero,zero
cvtss2sd %xmm3, %xmm3
movss (%rbx,%r14), %xmm4 # xmm4 = mem[0],zero,zero,zero
cvtss2sd %xmm4, %xmm4
movl $.L.str, %esi
movb $5, %al
callq fprintf
addq $20, %r14
cmpq $116, %r14
jne .LBB2_1
# %bb.2:
movq stdout(%rip), %rsi
movl $10, %edi
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
jmp fputc@PLT # TAILCALL
.Lfunc_end2:
.size _Z12printResultsPf, .Lfunc_end2-_Z12printResultsPf
.cfi_endproc
# -- End function
.globl _Z15printResultsGPUPf # -- Begin function _Z15printResultsGPUPf
.p2align 4, 0x90
.type _Z15printResultsGPUPf,@function
_Z15printResultsGPUPf: # @_Z15printResultsGPUPf
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rdi, %r14
movl $100, %edi
callq malloc
movq %rax, %rbx
movl $100, %edx
movq %rax, %rdi
movq %r14, %rsi
movl $2, %ecx
callq hipMemcpy
movl $16, %r14d
.p2align 4, 0x90
.LBB3_1: # =>This Inner Loop Header: Depth=1
movq stdout(%rip), %rdi
movss -16(%rbx,%r14), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss -12(%rbx,%r14), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movss -8(%rbx,%r14), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
movss -4(%rbx,%r14), %xmm3 # xmm3 = mem[0],zero,zero,zero
cvtss2sd %xmm3, %xmm3
movss (%rbx,%r14), %xmm4 # xmm4 = mem[0],zero,zero,zero
cvtss2sd %xmm4, %xmm4
movl $.L.str, %esi
movb $5, %al
callq fprintf
addq $20, %r14
cmpq $116, %r14
jne .LBB3_1
# %bb.2: # %_Z12printResultsPf.exit
movq stdout(%rip), %rsi
movl $10, %edi
callq fputc@PLT
movq %rbx, %rdi
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
jmp free # TAILCALL
.Lfunc_end3:
.size _Z15printResultsGPUPf, .Lfunc_end3-_Z15printResultsGPUPf
.cfi_endproc
# -- End function
.globl _Z22__device_stub__mat_addPfS_S_ # -- Begin function _Z22__device_stub__mat_addPfS_S_
.p2align 4, 0x90
.type _Z22__device_stub__mat_addPfS_S_,@function
_Z22__device_stub__mat_addPfS_S_: # @_Z22__device_stub__mat_addPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7mat_addPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end4:
.size _Z22__device_stub__mat_addPfS_S_, .Lfunc_end4-_Z22__device_stub__mat_addPfS_S_
.cfi_endproc
# -- End function
.globl _Z22__device_stub__mat_subPfS_S_ # -- Begin function _Z22__device_stub__mat_subPfS_S_
.p2align 4, 0x90
.type _Z22__device_stub__mat_subPfS_S_,@function
_Z22__device_stub__mat_subPfS_S_: # @_Z22__device_stub__mat_subPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7mat_subPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end5:
.size _Z22__device_stub__mat_subPfS_S_, .Lfunc_end5-_Z22__device_stub__mat_subPfS_S_
.cfi_endproc
# -- End function
.globl _Z23__device_stub__mat_multPfS_S_ # -- Begin function _Z23__device_stub__mat_multPfS_S_
.p2align 4, 0x90
.type _Z23__device_stub__mat_multPfS_S_,@function
_Z23__device_stub__mat_multPfS_S_: # @_Z23__device_stub__mat_multPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8mat_multPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end6:
.size _Z23__device_stub__mat_multPfS_S_, .Lfunc_end6-_Z23__device_stub__mat_multPfS_S_
.cfi_endproc
# -- End function
.globl _Z11mat_add_cpuPfS_S_ # -- Begin function _Z11mat_add_cpuPfS_S_
.p2align 4, 0x90
.type _Z11mat_add_cpuPfS_S_,@function
_Z11mat_add_cpuPfS_S_: # @_Z11mat_add_cpuPfS_S_
.cfi_startproc
# %bb.0:
xorl %eax, %eax
.p2align 4, 0x90
.LBB7_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB7_2 Depth 2
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB7_2: # Parent Loop BB7_1 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rdi,%rcx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%rsi,%rcx,4), %xmm0
movss %xmm0, (%rdx,%rcx,4)
incq %rcx
cmpq $5, %rcx
jne .LBB7_2
# %bb.3: # in Loop: Header=BB7_1 Depth=1
incq %rax
addq $20, %rdx
addq $20, %rsi
addq $20, %rdi
cmpq $5, %rax
jne .LBB7_1
# %bb.4:
retq
.Lfunc_end7:
.size _Z11mat_add_cpuPfS_S_, .Lfunc_end7-_Z11mat_add_cpuPfS_S_
.cfi_endproc
# -- End function
.globl _Z11mat_sub_cpuPfS_S_ # -- Begin function _Z11mat_sub_cpuPfS_S_
.p2align 4, 0x90
.type _Z11mat_sub_cpuPfS_S_,@function
_Z11mat_sub_cpuPfS_S_: # @_Z11mat_sub_cpuPfS_S_
.cfi_startproc
# %bb.0:
xorl %eax, %eax
.p2align 4, 0x90
.LBB8_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB8_2 Depth 2
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB8_2: # Parent Loop BB8_1 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rdi,%rcx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
subss (%rsi,%rcx,4), %xmm0
movss %xmm0, (%rdx,%rcx,4)
incq %rcx
cmpq $5, %rcx
jne .LBB8_2
# %bb.3: # in Loop: Header=BB8_1 Depth=1
incq %rax
addq $20, %rdx
addq $20, %rsi
addq $20, %rdi
cmpq $5, %rax
jne .LBB8_1
# %bb.4:
retq
.Lfunc_end8:
.size _Z11mat_sub_cpuPfS_S_, .Lfunc_end8-_Z11mat_sub_cpuPfS_S_
.cfi_endproc
# -- End function
.globl _Z12mat_mult_cpuPfS_S_ # -- Begin function _Z12mat_mult_cpuPfS_S_
.p2align 4, 0x90
.type _Z12mat_mult_cpuPfS_S_,@function
_Z12mat_mult_cpuPfS_S_: # @_Z12mat_mult_cpuPfS_S_
.cfi_startproc
# %bb.0:
xorl %eax, %eax
.p2align 4, 0x90
.LBB9_1: # %.preheader21
# =>This Loop Header: Depth=1
# Child Loop BB9_2 Depth 2
# Child Loop BB9_3 Depth 3
leaq (%rax,%rax,4), %rcx
leaq (%rdx,%rcx,4), %rcx
movq %rsi, %r8
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB9_2: # %.preheader
# Parent Loop BB9_1 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB9_3 Depth 3
xorps %xmm0, %xmm0
movq %r8, %r10
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB9_3: # Parent Loop BB9_1 Depth=1
# Parent Loop BB9_2 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rdi,%r11,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r10), %xmm1
addss %xmm1, %xmm0
incq %r11
addq $20, %r10
cmpq $5, %r11
jne .LBB9_3
# %bb.4: # in Loop: Header=BB9_2 Depth=2
movss %xmm0, (%rcx,%r9,4)
incq %r9
addq $4, %r8
cmpq $5, %r9
jne .LBB9_2
# %bb.5: # in Loop: Header=BB9_1 Depth=1
incq %rax
addq $20, %rdi
cmpq $5, %rax
jne .LBB9_1
# %bb.6:
retq
.Lfunc_end9:
.size _Z12mat_mult_cpuPfS_S_, .Lfunc_end9-_Z12mat_mult_cpuPfS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $120, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movabsq $21474836485, %rbx # imm = 0x500000005
movabsq $4294967297, %r14 # imm = 0x100000001
callq _Z4initv
leaq 16(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB10_2
# %bb.1:
movq mat_1d(%rip), %rax
movq mat_2d(%rip), %rcx
movq mat_3d(%rip), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7mat_addPfS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB10_2:
callq hipDeviceSynchronize
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 16(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq stdout(%rip), %rdi
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %esi
movb $1, %al
callq fprintf
movq mat_3d(%rip), %rdi
callq _Z15printResultsGPUPf
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB10_4
# %bb.3:
movq mat_1d(%rip), %rax
movq mat_2d(%rip), %rcx
movq mat_3d(%rip), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7mat_subPfS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB10_4:
callq hipDeviceSynchronize
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 16(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq stdout(%rip), %rdi
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %esi
movb $1, %al
callq fprintf
movq mat_3d(%rip), %rdi
callq _Z15printResultsGPUPf
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB10_6
# %bb.5:
movq mat_1d(%rip), %rax
movq mat_2d(%rip), %rcx
movq mat_3d(%rip), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z8mat_multPfS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB10_6:
callq hipDeviceSynchronize
movq 8(%rsp), %rdi
xorl %ebx, %ebx
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 16(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq stdout(%rip), %rdi
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.4, %esi
movb $1, %al
callq fprintf
movq mat_3d(%rip), %rdi
callq _Z15printResultsGPUPf
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq mat_1(%rip), %rax
movq mat_2(%rip), %rcx
movq mat_3(%rip), %rdx
.p2align 4, 0x90
.LBB10_7: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB10_8 Depth 2
xorl %esi, %esi
.p2align 4, 0x90
.LBB10_8: # Parent Loop BB10_7 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rax,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%rcx,%rsi,4), %xmm0
movss %xmm0, (%rdx,%rsi,4)
incq %rsi
cmpq $5, %rsi
jne .LBB10_8
# %bb.9: # in Loop: Header=BB10_7 Depth=1
incq %rbx
addq $20, %rdx
addq $20, %rcx
addq $20, %rax
cmpq $5, %rbx
jne .LBB10_7
# %bb.10: # %_Z11mat_add_cpuPfS_S_.exit
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 16(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq stdout(%rip), %rdi
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.5, %esi
movb $1, %al
callq fprintf
movl $16, %ebx
movq mat_3(%rip), %r14
.p2align 4, 0x90
.LBB10_11: # =>This Inner Loop Header: Depth=1
movq stdout(%rip), %rdi
movss -16(%r14,%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss -12(%r14,%rbx), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movss -8(%r14,%rbx), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
movss -4(%r14,%rbx), %xmm3 # xmm3 = mem[0],zero,zero,zero
cvtss2sd %xmm3, %xmm3
movss (%r14,%rbx), %xmm4 # xmm4 = mem[0],zero,zero,zero
cvtss2sd %xmm4, %xmm4
movl $.L.str, %esi
movb $5, %al
callq fprintf
addq $20, %rbx
cmpq $116, %rbx
jne .LBB10_11
# %bb.12: # %_Z12printResultsPf.exit
movq stdout(%rip), %rsi
movl $10, %edi
callq fputc@PLT
movq 16(%rsp), %rdi
xorl %ebx, %ebx
xorl %esi, %esi
callq hipEventRecord
movq mat_1(%rip), %rax
movq mat_2(%rip), %rcx
movq mat_3(%rip), %rdx
.p2align 4, 0x90
.LBB10_13: # %.preheader.i32
# =>This Loop Header: Depth=1
# Child Loop BB10_14 Depth 2
xorl %esi, %esi
.p2align 4, 0x90
.LBB10_14: # Parent Loop BB10_13 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rax,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
subss (%rcx,%rsi,4), %xmm0
movss %xmm0, (%rdx,%rsi,4)
incq %rsi
cmpq $5, %rsi
jne .LBB10_14
# %bb.15: # in Loop: Header=BB10_13 Depth=1
incq %rbx
addq $20, %rdx
addq $20, %rcx
addq $20, %rax
cmpq $5, %rbx
jne .LBB10_13
# %bb.16: # %_Z11mat_sub_cpuPfS_S_.exit
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 16(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq stdout(%rip), %rdi
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.6, %esi
movb $1, %al
callq fprintf
movl $16, %ebx
movq mat_3(%rip), %r14
.p2align 4, 0x90
.LBB10_17: # =>This Inner Loop Header: Depth=1
movq stdout(%rip), %rdi
movss -16(%r14,%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss -12(%r14,%rbx), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movss -8(%r14,%rbx), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
movss -4(%r14,%rbx), %xmm3 # xmm3 = mem[0],zero,zero,zero
cvtss2sd %xmm3, %xmm3
movss (%r14,%rbx), %xmm4 # xmm4 = mem[0],zero,zero,zero
cvtss2sd %xmm4, %xmm4
movl $.L.str, %esi
movb $5, %al
callq fprintf
addq $20, %rbx
cmpq $116, %rbx
jne .LBB10_17
# %bb.18: # %_Z12printResultsPf.exit40
movq stdout(%rip), %rsi
movl $10, %edi
callq fputc@PLT
movq 16(%rsp), %rdi
xorl %ebx, %ebx
xorl %esi, %esi
callq hipEventRecord
movq mat_1(%rip), %rax
movq mat_2(%rip), %rcx
movq mat_3(%rip), %rdx
.p2align 4, 0x90
.LBB10_19: # %.preheader21.i
# =>This Loop Header: Depth=1
# Child Loop BB10_20 Depth 2
# Child Loop BB10_21 Depth 3
leaq (%rbx,%rbx,4), %rsi
leaq (%rdx,%rsi,4), %rsi
movq %rcx, %rdi
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB10_20: # %.preheader.i41
# Parent Loop BB10_19 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB10_21 Depth 3
xorps %xmm0, %xmm0
movq %rdi, %r9
xorl %r10d, %r10d
.p2align 4, 0x90
.LBB10_21: # Parent Loop BB10_19 Depth=1
# Parent Loop BB10_20 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rax,%r10,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r9), %xmm1
addss %xmm1, %xmm0
incq %r10
addq $20, %r9
cmpq $5, %r10
jne .LBB10_21
# %bb.22: # in Loop: Header=BB10_20 Depth=2
movss %xmm0, (%rsi,%r8,4)
incq %r8
addq $4, %rdi
cmpq $5, %r8
jne .LBB10_20
# %bb.23: # in Loop: Header=BB10_19 Depth=1
incq %rbx
addq $20, %rax
cmpq $5, %rbx
jne .LBB10_19
# %bb.24: # %_Z12mat_mult_cpuPfS_S_.exit
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 16(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq stdout(%rip), %rdi
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.7, %esi
movb $1, %al
callq fprintf
movl $16, %ebx
movq mat_3(%rip), %r14
.p2align 4, 0x90
.LBB10_25: # =>This Inner Loop Header: Depth=1
movq stdout(%rip), %rdi
movss -16(%r14,%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss -12(%r14,%rbx), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movss -8(%r14,%rbx), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
movss -4(%r14,%rbx), %xmm3 # xmm3 = mem[0],zero,zero,zero
cvtss2sd %xmm3, %xmm3
movss (%r14,%rbx), %xmm4 # xmm4 = mem[0],zero,zero,zero
cvtss2sd %xmm4, %xmm4
movl $.L.str, %esi
movb $5, %al
callq fprintf
addq $20, %rbx
cmpq $116, %rbx
jne .LBB10_25
# %bb.26: # %_Z12printResultsPf.exit46
movq stdout(%rip), %rsi
movl $10, %edi
callq fputc@PLT
movq mat_1(%rip), %rdi
callq free
movq mat_2(%rip), %rdi
callq free
movq mat_3(%rip), %rdi
callq free
movq mat_1d(%rip), %rdi
callq hipFree
movq mat_2d(%rip), %rdi
callq hipFree
movq mat_3d(%rip), %rdi
callq hipFree
.Lfunc_end10:
.size main, .Lfunc_end10-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB11_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB11_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7mat_addPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7mat_subPfS_S_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8mat_multPfS_S_, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end11:
.size __hip_module_ctor, .Lfunc_end11-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB12_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB12_2:
retq
.Lfunc_end12:
.size __hip_module_dtor, .Lfunc_end12-__hip_module_dtor
.cfi_endproc
# -- End function
.type mat_1d,@object # @mat_1d
.bss
.globl mat_1d
.p2align 3, 0x0
mat_1d:
.quad 0
.size mat_1d, 8
.type mat_2d,@object # @mat_2d
.globl mat_2d
.p2align 3, 0x0
mat_2d:
.quad 0
.size mat_2d, 8
.type mat_3d,@object # @mat_3d
.globl mat_3d
.p2align 3, 0x0
mat_3d:
.quad 0
.size mat_3d, 8
.type mat_1,@object # @mat_1
.globl mat_1
.p2align 3, 0x0
mat_1:
.quad 0
.size mat_1, 8
.type mat_2,@object # @mat_2
.globl mat_2
.p2align 3, 0x0
mat_2:
.quad 0
.size mat_2, 8
.type mat_3,@object # @mat_3
.globl mat_3
.p2align 3, 0x0
mat_3:
.quad 0
.size mat_3, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "[%f, %f, %f, %f, %f] \n"
.size .L.str, 23
.type _Z7mat_addPfS_S_,@object # @_Z7mat_addPfS_S_
.section .rodata,"a",@progbits
.globl _Z7mat_addPfS_S_
.p2align 3, 0x0
_Z7mat_addPfS_S_:
.quad _Z22__device_stub__mat_addPfS_S_
.size _Z7mat_addPfS_S_, 8
.type _Z7mat_subPfS_S_,@object # @_Z7mat_subPfS_S_
.globl _Z7mat_subPfS_S_
.p2align 3, 0x0
_Z7mat_subPfS_S_:
.quad _Z22__device_stub__mat_subPfS_S_
.size _Z7mat_subPfS_S_, 8
.type _Z8mat_multPfS_S_,@object # @_Z8mat_multPfS_S_
.globl _Z8mat_multPfS_S_
.p2align 3, 0x0
_Z8mat_multPfS_S_:
.quad _Z23__device_stub__mat_multPfS_S_
.size _Z8mat_multPfS_S_, 8
.type .L.str.2,@object # @.str.2
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.2:
.asciz "GPU mat_add kernel time: %f.\n"
.size .L.str.2, 30
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "GPU mat_sub kernel time: %f.\n"
.size .L.str.3, 30
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "GPU mat_mult kernel time: %f.\n"
.size .L.str.4, 31
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "CPU mat_add kernel time: %f.\n"
.size .L.str.5, 30
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "CPU mat_sub kernel time: %f.\n"
.size .L.str.6, 30
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "CPU mat_mult kernel time: %f.\n"
.size .L.str.7, 31
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7mat_addPfS_S_"
.size .L__unnamed_1, 17
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z7mat_subPfS_S_"
.size .L__unnamed_2, 17
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z8mat_multPfS_S_"
.size .L__unnamed_3, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__mat_addPfS_S_
.addrsig_sym _Z22__device_stub__mat_subPfS_S_
.addrsig_sym _Z23__device_stub__mat_multPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym mat_1d
.addrsig_sym mat_2d
.addrsig_sym mat_3d
.addrsig_sym _Z7mat_addPfS_S_
.addrsig_sym _Z7mat_subPfS_S_
.addrsig_sym _Z8mat_multPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z8mat_multPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R14, SR_TID.X ; /* 0x00000000000e7919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R19, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff137435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R17, SR_TID.Y ; /* 0x0000000000117919 */
/* 0x000e620000002200 */
/*0050*/ LEA R2, R14, R14, 0x2 ; /* 0x0000000e0e027211 */
/* 0x001fcc00078e10ff */
/*0060*/ IMAD.WIDE R4, R17, R19, c[0x0][0x168] ; /* 0x00005a0011047625 */
/* 0x002fc800078e0213 */
/*0070*/ IMAD.WIDE R2, R2, R19, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fe200078e0213 */
/*0080*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea8000c1e1900 */
/*00a0*/ LDG.E R6, [R4.64+0x14] ; /* 0x0000140404067981 */
/* 0x000ee8000c1e1900 */
/*00b0*/ LDG.E R9, [R2.64+0x4] ; /* 0x0000040402097981 */
/* 0x000ee8000c1e1900 */
/*00c0*/ LDG.E R8, [R4.64+0x28] ; /* 0x0000280404087981 */
/* 0x000f28000c1e1900 */
/*00d0*/ LDG.E R11, [R2.64+0x8] ; /* 0x00000804020b7981 */
/* 0x000f28000c1e1900 */
/*00e0*/ LDG.E R10, [R4.64+0x3c] ; /* 0x00003c04040a7981 */
/* 0x000f68000c1e1900 */
/*00f0*/ LDG.E R13, [R2.64+0xc] ; /* 0x00000c04020d7981 */
/* 0x000f68000c1e1900 */
/*0100*/ LDG.E R12, [R4.64+0x50] ; /* 0x00005004040c7981 */
/* 0x000f68000c1e1900 */
/*0110*/ LDG.E R15, [R2.64+0x10] ; /* 0x00001004020f7981 */
/* 0x000f62000c1e1900 */
/*0120*/ FFMA R0, R0, R7, RZ ; /* 0x0000000700007223 */
/* 0x004fc800000000ff */
/*0130*/ FFMA R0, R9, R6, R0 ; /* 0x0000000609007223 */
/* 0x008fe40000000000 */
/*0140*/ IMAD R6, R14, c[0x0][0x0], R17 ; /* 0x000000000e067a24 */
/* 0x000fc800078e0211 */
/*0150*/ IMAD.WIDE R6, R6, R19, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0213 */
/*0160*/ FFMA R0, R11, R8, R0 ; /* 0x000000080b007223 */
/* 0x010fc80000000000 */
/*0170*/ FFMA R0, R13, R10, R0 ; /* 0x0000000a0d007223 */
/* 0x020fc80000000000 */
/*0180*/ FFMA R15, R15, R12, R0 ; /* 0x0000000c0f0f7223 */
/* 0x000fca0000000000 */
/*0190*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x000fe2000c101904 */
/*01a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01b0*/ BRA 0x1b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z7mat_subPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e240000002200 */
/*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00b0*/ FADD R9, R2, -R5 ; /* 0x8000000502097221 */
/* 0x004fca0000000000 */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z7mat_addPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e240000002200 */
/*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00b0*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7mat_addPfS_S_
.globl _Z7mat_addPfS_S_
.p2align 8
.type _Z7mat_addPfS_S_,@function
_Z7mat_addPfS_S_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_u32_u24_e32 v1, s2, v1
v_add_lshl_u32 v0, v1, v0, 2
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7mat_addPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7mat_addPfS_S_, .Lfunc_end0-_Z7mat_addPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z7mat_subPfS_S_
.globl _Z7mat_subPfS_S_
.p2align 8
.type _Z7mat_subPfS_S_,@function
_Z7mat_subPfS_S_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_u32_u24_e32 v1, s2, v1
v_add_lshl_u32 v0, v1, v0, 2
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_sub_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7mat_subPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z7mat_subPfS_S_, .Lfunc_end1-_Z7mat_subPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z8mat_multPfS_S_
.globl _Z8mat_multPfS_S_
.p2align 8
.type _Z8mat_multPfS_S_,@function
_Z8mat_multPfS_S_:
s_clause 0x1
s_load_b128 s[8:11], s[0:1], 0x0
s_load_b32 s4, s[0:1], 0x24
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
v_mov_b32_e32 v6, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_u32_u24_e32 v1, 5, v2
v_lshlrev_b32_e32 v0, 2, v1
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b32_e32 v1, 2, v3
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, s2, s8, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v5, null, s9, 0, s2
v_add_co_u32 v0, s2, s10, v1
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v1, null, s11, 0, s2
s_mov_b64 s[2:3], 0
.LBB2_1:
s_delay_alu instid0(SALU_CYCLE_1)
v_add_co_u32 v7, vcc_lo, v4, s2
v_add_co_ci_u32_e32 v8, vcc_lo, s3, v5, vcc_lo
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
global_load_b32 v9, v[0:1], off
global_load_b32 v7, v[7:8], off
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_cmp_eq_u64 s[2:3], 20
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v6, v7, v9
s_cbranch_scc0 .LBB2_1
s_load_b64 s[0:1], s[0:1], 0x10
s_and_b32 s2, 0xffff, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_u32_u24_e32 v0, s2, v2
v_add_lshl_u32 v0, v0, v3, 2
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v6, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8mat_multPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 12
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z8mat_multPfS_S_, .Lfunc_end2-_Z8mat_multPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7mat_addPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z7mat_addPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7mat_subPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z7mat_subPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8mat_multPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 14
.sgpr_spill_count: 0
.symbol: _Z8mat_multPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
// %%cu
// as data type is int, sum might overflow (depending on rand(), but the seq and parallel answers are still equal, or change int to long long (too lazy sorry))
#define THREADS_PER_BLOCK 256
using namespace std;
__global__ void calculate(int *arr_in, int* arr_out, int sz, int option){
int ind = threadIdx.x;
int dim = blockDim.x;
extern __shared__ int shared_mem[];
int actual_ind = blockIdx.x*blockDim.x + ind;
if(actual_ind < sz){
shared_mem[ind] = arr_in[actual_ind];
}else{
if(option == 0 || option == 3)
shared_mem[ind] = 0;
else if(option == 1){//maximum
shared_mem[ind] = -INT_MAX;
}else{//minimum
shared_mem[ind] = INT_MAX;
}
}
__syncthreads();
for(int i=dim/2 ; i > 0 ; i=i/2){
if(ind<i){
if(option == 0 || option == 3)
shared_mem[ind]+=shared_mem[ind+i];
else if(option == 1){
shared_mem[ind]=max(shared_mem[ind],shared_mem[ind+i]);
}else{
shared_mem[ind]=min(shared_mem[ind],shared_mem[ind+i]);
}
}
__syncthreads();
}
arr_out[blockIdx.x]=shared_mem[0];
} | code for sm_80
Function : _Z9calculatePiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0030*/ BSSY B0, 0x1b0 ; /* 0x0000017000007945 */
/* 0x000fe20003800000 */
/*0040*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff087624 */
/* 0x000fe200078e00ff */
/*0050*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e220000002100 */
/*0060*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff057624 */
/* 0x000fe400078e00ff */
/*0070*/ IMAD R2, R4, c[0x0][0x0], R7 ; /* 0x0000000004027a24 */
/* 0x001fe400078e0207 */
/*0080*/ IMAD.SHL.U32 R0, R7, 0x4, RZ ; /* 0x0000000407007824 */
/* 0x000fc600078e00ff */
/*0090*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06270 */
/*00a0*/ @!P0 BRA 0x160 ; /* 0x000000b000008947 */
/* 0x000fea0003800000 */
/*00b0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff027624 */
/* 0x000fe200078e00ff */
/*00c0*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x174], PT ; /* 0x00005d00ff007a0c */
/* 0x000fc80003f05270 */
/*00d0*/ ISETP.NE.AND P0, PT, R2, 0x3, P0 ; /* 0x000000030200780c */
/* 0x000fda0000705270 */
/*00e0*/ @!P0 STS [R7.X4], RZ ; /* 0x000000ff07008388 */
/* 0x0001e20000004800 */
/*00f0*/ @!P0 BRA 0x1a0 ; /* 0x000000a000008947 */
/* 0x000fea0003800000 */
/*0100*/ ISETP.NE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fda0003f05270 */
/*0110*/ @P0 IMAD.MOV.U32 R2, RZ, RZ, 0x7fffffff ; /* 0x7fffffffff020424 */
/* 0x000fe200078e00ff */
/*0120*/ @!P0 MOV R6, 0x80000001 ; /* 0x8000000100068802 */
/* 0x000fc80000000f00 */
/*0130*/ @P0 STS [R7.X4], R2 ; /* 0x0000000207000388 */
/* 0x0003e80000004800 */
/*0140*/ @!P0 STS [R7.X4], R6 ; /* 0x0000000607008388 */
/* 0x0003e20000004800 */
/*0150*/ BRA 0x1a0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0160*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*0170*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0203 */
/*0180*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0190*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041e40000004800 */
/*01a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*01b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*01c0*/ ISETP.GE.AND P0, PT, R8, 0x2, PT ; /* 0x000000020800780c */
/* 0x000fda0003f06270 */
/*01d0*/ @!P0 BRA 0x380 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*01e0*/ LEA.HI R2, R5, R5, RZ, 0x1 ; /* 0x0000000505027211 */
/* 0x003fe200078f08ff */
/*01f0*/ IMAD.MOV.U32 R6, RZ, RZ, R5 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0005 */
/*0200*/ BSSY B0, 0x350 ; /* 0x0000014000007945 */
/* 0x000fe40003800000 */
/*0210*/ SHF.R.S32.HI R5, RZ, 0x1, R2 ; /* 0x00000001ff057819 */
/* 0x000fc80000011402 */
/*0220*/ ISETP.GE.AND P0, PT, R7, R5, PT ; /* 0x000000050700720c */
/* 0x000fda0003f06270 */
/*0230*/ @P0 BRA 0x340 ; /* 0x0000010000000947 */
/* 0x000fea0003800000 */
/*0240*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff0a7624 */
/* 0x000fe200078e00ff */
/*0250*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x174], PT ; /* 0x00005d00ff007a0c */
/* 0x000fe40003f05270 */
/*0260*/ LEA R8, R5, R0, 0x2 ; /* 0x0000000005087211 */
/* 0x000fe400078e10ff */
/*0270*/ ISETP.NE.AND P0, PT, R10, 0x3, P0 ; /* 0x000000030a00780c */
/* 0x000fda0000705270 */
/*0280*/ @!P0 LDS R3, [R7.X4] ; /* 0x0000000007038984 */
/* 0x000fe80000004800 */
/*0290*/ @!P0 LDS R2, [R8] ; /* 0x0000000008028984 */
/* 0x000e240000000800 */
/*02a0*/ @!P0 IMAD.IADD R2, R2, 0x1, R3 ; /* 0x0000000102028824 */
/* 0x001fca00078e0203 */
/*02b0*/ @!P0 STS [R7.X4], R2 ; /* 0x0000000207008388 */
/* 0x0001e20000004800 */
/*02c0*/ @!P0 BRA 0x340 ; /* 0x0000007000008947 */
/* 0x000fea0003800000 */
/*02d0*/ LDS R9, [R8] ; /* 0x0000000008097984 */
/* 0x000fe20000000800 */
/*02e0*/ ISETP.NE.AND P0, PT, R10, 0x1, PT ; /* 0x000000010a00780c */
/* 0x000fc60003f05270 */
/*02f0*/ LDS R2, [R7.X4] ; /* 0x0000000007027984 */
/* 0x001e340000004800 */
/*0300*/ @P0 IMNMX R3, R9, R2, PT ; /* 0x0000000209030217 */
/* 0x001fc40003800200 */
/*0310*/ @!P0 IMNMX R2, R9, R2, !PT ; /* 0x0000000209028217 */
/* 0x000fc60007800200 */
/*0320*/ @P0 STS [R7.X4], R3 ; /* 0x0000000307000388 */
/* 0x0001e80000004800 */
/*0330*/ @!P0 STS [R7.X4], R2 ; /* 0x0000000207008388 */
/* 0x0001e40000004800 */
/*0340*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0350*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0360*/ ISETP.GT.AND P0, PT, R6, 0x3, PT ; /* 0x000000030600780c */
/* 0x000fda0003f04270 */
/*0370*/ @P0 BRA 0x1e0 ; /* 0xfffffe6000000947 */
/* 0x000fea000383ffff */
/*0380*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000ea20000000800 */
/*0390*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x001fc800078e00ff */
/*03a0*/ IMAD.WIDE.U32 R2, R4, R3, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x002fca00078e0003 */
/*03b0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x004fe2000c101904 */
/*03c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03d0*/ BRA 0x3d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
// %%cu
// as data type is int, sum might overflow (depending on rand(), but the seq and parallel answers are still equal, or change int to long long (too lazy sorry))
#define THREADS_PER_BLOCK 256
using namespace std;
__global__ void calculate(int *arr_in, int* arr_out, int sz, int option){
int ind = threadIdx.x;
int dim = blockDim.x;
extern __shared__ int shared_mem[];
int actual_ind = blockIdx.x*blockDim.x + ind;
if(actual_ind < sz){
shared_mem[ind] = arr_in[actual_ind];
}else{
if(option == 0 || option == 3)
shared_mem[ind] = 0;
else if(option == 1){//maximum
shared_mem[ind] = -INT_MAX;
}else{//minimum
shared_mem[ind] = INT_MAX;
}
}
__syncthreads();
for(int i=dim/2 ; i > 0 ; i=i/2){
if(ind<i){
if(option == 0 || option == 3)
shared_mem[ind]+=shared_mem[ind+i];
else if(option == 1){
shared_mem[ind]=max(shared_mem[ind],shared_mem[ind+i]);
}else{
shared_mem[ind]=min(shared_mem[ind],shared_mem[ind+i]);
}
}
__syncthreads();
}
arr_out[blockIdx.x]=shared_mem[0];
} | .file "tmpxft_0008c4a7_00000000-6_calculate.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9calculatePiS_iiPiS_ii
.type _Z32__device_stub__Z9calculatePiS_iiPiS_ii, @function
_Z32__device_stub__Z9calculatePiS_iiPiS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9calculatePiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z32__device_stub__Z9calculatePiS_iiPiS_ii, .-_Z32__device_stub__Z9calculatePiS_iiPiS_ii
.globl _Z9calculatePiS_ii
.type _Z9calculatePiS_ii, @function
_Z9calculatePiS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9calculatePiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9calculatePiS_ii, .-_Z9calculatePiS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9calculatePiS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9calculatePiS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
// %%cu
// as data type is int, sum might overflow (depending on rand(), but the seq and parallel answers are still equal, or change int to long long (too lazy sorry))
#define THREADS_PER_BLOCK 256
using namespace std;
__global__ void calculate(int *arr_in, int* arr_out, int sz, int option){
int ind = threadIdx.x;
int dim = blockDim.x;
extern __shared__ int shared_mem[];
int actual_ind = blockIdx.x*blockDim.x + ind;
if(actual_ind < sz){
shared_mem[ind] = arr_in[actual_ind];
}else{
if(option == 0 || option == 3)
shared_mem[ind] = 0;
else if(option == 1){//maximum
shared_mem[ind] = -INT_MAX;
}else{//minimum
shared_mem[ind] = INT_MAX;
}
}
__syncthreads();
for(int i=dim/2 ; i > 0 ; i=i/2){
if(ind<i){
if(option == 0 || option == 3)
shared_mem[ind]+=shared_mem[ind+i];
else if(option == 1){
shared_mem[ind]=max(shared_mem[ind],shared_mem[ind+i]);
}else{
shared_mem[ind]=min(shared_mem[ind],shared_mem[ind+i]);
}
}
__syncthreads();
}
arr_out[blockIdx.x]=shared_mem[0];
} | #include <hip/hip_runtime.h>
#include "includes.h"
// %%cu
// as data type is int, sum might overflow (depending on rand(), but the seq and parallel answers are still equal, or change int to long long (too lazy sorry))
#define THREADS_PER_BLOCK 256
using namespace std;
__global__ void calculate(int *arr_in, int* arr_out, int sz, int option){
int ind = threadIdx.x;
int dim = blockDim.x;
extern __shared__ int shared_mem[];
int actual_ind = blockIdx.x*blockDim.x + ind;
if(actual_ind < sz){
shared_mem[ind] = arr_in[actual_ind];
}else{
if(option == 0 || option == 3)
shared_mem[ind] = 0;
else if(option == 1){//maximum
shared_mem[ind] = -INT_MAX;
}else{//minimum
shared_mem[ind] = INT_MAX;
}
}
__syncthreads();
for(int i=dim/2 ; i > 0 ; i=i/2){
if(ind<i){
if(option == 0 || option == 3)
shared_mem[ind]+=shared_mem[ind+i];
else if(option == 1){
shared_mem[ind]=max(shared_mem[ind],shared_mem[ind+i]);
}else{
shared_mem[ind]=min(shared_mem[ind],shared_mem[ind+i]);
}
}
__syncthreads();
}
arr_out[blockIdx.x]=shared_mem[0];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
// %%cu
// as data type is int, sum might overflow (depending on rand(), but the seq and parallel answers are still equal, or change int to long long (too lazy sorry))
#define THREADS_PER_BLOCK 256
using namespace std;
__global__ void calculate(int *arr_in, int* arr_out, int sz, int option){
int ind = threadIdx.x;
int dim = blockDim.x;
extern __shared__ int shared_mem[];
int actual_ind = blockIdx.x*blockDim.x + ind;
if(actual_ind < sz){
shared_mem[ind] = arr_in[actual_ind];
}else{
if(option == 0 || option == 3)
shared_mem[ind] = 0;
else if(option == 1){//maximum
shared_mem[ind] = -INT_MAX;
}else{//minimum
shared_mem[ind] = INT_MAX;
}
}
__syncthreads();
for(int i=dim/2 ; i > 0 ; i=i/2){
if(ind<i){
if(option == 0 || option == 3)
shared_mem[ind]+=shared_mem[ind+i];
else if(option == 1){
shared_mem[ind]=max(shared_mem[ind],shared_mem[ind+i]);
}else{
shared_mem[ind]=min(shared_mem[ind],shared_mem[ind+i]);
}
}
__syncthreads();
}
arr_out[blockIdx.x]=shared_mem[0];
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9calculatePiS_ii
.globl _Z9calculatePiS_ii
.p2align 8
.type _Z9calculatePiS_ii,@function
_Z9calculatePiS_ii:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x10
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
v_cmp_le_i32_e32 vcc_lo, s4, v1
s_and_saveexec_b32 s4, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s4, exec_lo, s4
s_cbranch_execz .LBB0_14
s_cmp_lt_i32 s5, 1
s_cbranch_scc1 .LBB0_4
s_cmp_gt_i32 s5, 2
s_cbranch_scc0 .LBB0_5
s_cmp_lg_u32 s5, 3
s_mov_b32 s6, 0
s_cselect_b32 s7, -1, 0
s_cbranch_execz .LBB0_6
s_branch .LBB0_9
.LBB0_4:
s_mov_b32 s7, 0
s_cbranch_execnz .LBB0_10
s_branch .LBB0_11
.LBB0_5:
s_mov_b32 s7, 0
.LBB0_6:
s_cmp_eq_u32 s5, 1
s_cbranch_scc0 .LBB0_8
s_mov_b32 s7, 0
s_mov_b32 s6, 0x80000001
s_branch .LBB0_9
.LBB0_8:
s_mov_b32 s7, -1
.LBB0_9:
s_branch .LBB0_11
.LBB0_10:
s_cmp_lg_u32 s5, 0
s_mov_b32 s6, 0
s_cselect_b32 s7, -1, 0
.LBB0_11:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s7
s_cbranch_vccnz .LBB0_13
s_brev_b32 s6, -2
.LBB0_13:
.LBB0_14:
s_or_saveexec_b32 s4, s4
v_mov_b32_e32 v2, s6
s_xor_b32 exec_lo, exec_lo, s4
s_cbranch_execz .LBB0_16
s_load_b64 s[6:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
global_load_b32 v2, v[1:2], off
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s4
v_lshl_add_u32 v1, v0, 2, 0
s_cmp_lt_u32 s3, 2
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB0_18
.LBB0_17:
s_or_b32 exec_lo, exec_lo, s6
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s3, 4
s_mov_b32 s3, s4
.LBB0_18:
buffer_gl0_inv
s_cbranch_scc1 .LBB0_34
s_lshr_b32 s4, s3, 1
s_mov_b32 s6, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB0_17
s_cmp_lt_i32 s5, 1
s_cbranch_scc1 .LBB0_23
s_cmp_gt_i32 s5, 2
s_cbranch_scc0 .LBB0_24
s_cmp_lg_u32 s5, 3
s_mov_b32 s7, -1
s_cselect_b32 s8, -1, 0
s_cbranch_execz .LBB0_25
s_branch .LBB0_27
.LBB0_23:
s_mov_b32 s9, -1
s_mov_b32 s7, 0
s_mov_b32 s8, 0
s_branch .LBB0_28
.LBB0_24:
s_mov_b32 s7, 0
s_mov_b32 s8, 0
.LBB0_25:
s_cmp_eq_u32 s5, 1
s_mov_b32 s8, -1
s_cbranch_scc0 .LBB0_27
v_add_nc_u32_e32 v2, s4, v0
s_mov_b32 s8, 0
s_mov_b32 s9, 0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v2, v2, 2, 0
ds_load_b32 v3, v1
ds_load_b32 v2, v2
s_waitcnt lgkmcnt(0)
v_max_i32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB0_28
.LBB0_27:
s_mov_b32 s9, 0
.LBB0_28:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 vcc_lo, exec_lo, s9
s_cbranch_vccz .LBB0_30
s_cmp_eq_u32 s5, 0
s_cselect_b32 s7, -1, 0
s_cmp_lg_u32 s5, 0
s_cselect_b32 s8, -1, 0
.LBB0_30:
v_add_nc_u32_e32 v2, s4, v0
s_and_not1_b32 vcc_lo, exec_lo, s8
s_cbranch_vccnz .LBB0_32
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v3, v2, 2, 0
s_mov_b32 s7, 0
ds_load_b32 v4, v1
ds_load_b32 v3, v3
s_waitcnt lgkmcnt(0)
v_min_i32_e32 v3, v4, v3
ds_store_b32 v1, v3
.LBB0_32:
s_and_not1_b32 vcc_lo, exec_lo, s7
s_cbranch_vccnz .LBB0_17
v_lshl_add_u32 v2, v2, 2, 0
ds_load_b32 v2, v2
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB0_17
.LBB0_34:
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_mov_b32 s3, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v1, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9calculatePiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9calculatePiS_ii, .Lfunc_end0-_Z9calculatePiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9calculatePiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9calculatePiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
// %%cu
// as data type is int, sum might overflow (depending on rand(), but the seq and parallel answers are still equal, or change int to long long (too lazy sorry))
#define THREADS_PER_BLOCK 256
using namespace std;
__global__ void calculate(int *arr_in, int* arr_out, int sz, int option){
int ind = threadIdx.x;
int dim = blockDim.x;
extern __shared__ int shared_mem[];
int actual_ind = blockIdx.x*blockDim.x + ind;
if(actual_ind < sz){
shared_mem[ind] = arr_in[actual_ind];
}else{
if(option == 0 || option == 3)
shared_mem[ind] = 0;
else if(option == 1){//maximum
shared_mem[ind] = -INT_MAX;
}else{//minimum
shared_mem[ind] = INT_MAX;
}
}
__syncthreads();
for(int i=dim/2 ; i > 0 ; i=i/2){
if(ind<i){
if(option == 0 || option == 3)
shared_mem[ind]+=shared_mem[ind+i];
else if(option == 1){
shared_mem[ind]=max(shared_mem[ind],shared_mem[ind+i]);
}else{
shared_mem[ind]=min(shared_mem[ind],shared_mem[ind+i]);
}
}
__syncthreads();
}
arr_out[blockIdx.x]=shared_mem[0];
} | .text
.file "calculate.hip"
.globl _Z24__device_stub__calculatePiS_ii # -- Begin function _Z24__device_stub__calculatePiS_ii
.p2align 4, 0x90
.type _Z24__device_stub__calculatePiS_ii,@function
_Z24__device_stub__calculatePiS_ii: # @_Z24__device_stub__calculatePiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9calculatePiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__calculatePiS_ii, .Lfunc_end0-_Z24__device_stub__calculatePiS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9calculatePiS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9calculatePiS_ii,@object # @_Z9calculatePiS_ii
.section .rodata,"a",@progbits
.globl _Z9calculatePiS_ii
.p2align 3, 0x0
_Z9calculatePiS_ii:
.quad _Z24__device_stub__calculatePiS_ii
.size _Z9calculatePiS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9calculatePiS_ii"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__calculatePiS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9calculatePiS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9calculatePiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0030*/ BSSY B0, 0x1b0 ; /* 0x0000017000007945 */
/* 0x000fe20003800000 */
/*0040*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff087624 */
/* 0x000fe200078e00ff */
/*0050*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e220000002100 */
/*0060*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff057624 */
/* 0x000fe400078e00ff */
/*0070*/ IMAD R2, R4, c[0x0][0x0], R7 ; /* 0x0000000004027a24 */
/* 0x001fe400078e0207 */
/*0080*/ IMAD.SHL.U32 R0, R7, 0x4, RZ ; /* 0x0000000407007824 */
/* 0x000fc600078e00ff */
/*0090*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06270 */
/*00a0*/ @!P0 BRA 0x160 ; /* 0x000000b000008947 */
/* 0x000fea0003800000 */
/*00b0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff027624 */
/* 0x000fe200078e00ff */
/*00c0*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x174], PT ; /* 0x00005d00ff007a0c */
/* 0x000fc80003f05270 */
/*00d0*/ ISETP.NE.AND P0, PT, R2, 0x3, P0 ; /* 0x000000030200780c */
/* 0x000fda0000705270 */
/*00e0*/ @!P0 STS [R7.X4], RZ ; /* 0x000000ff07008388 */
/* 0x0001e20000004800 */
/*00f0*/ @!P0 BRA 0x1a0 ; /* 0x000000a000008947 */
/* 0x000fea0003800000 */
/*0100*/ ISETP.NE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fda0003f05270 */
/*0110*/ @P0 IMAD.MOV.U32 R2, RZ, RZ, 0x7fffffff ; /* 0x7fffffffff020424 */
/* 0x000fe200078e00ff */
/*0120*/ @!P0 MOV R6, 0x80000001 ; /* 0x8000000100068802 */
/* 0x000fc80000000f00 */
/*0130*/ @P0 STS [R7.X4], R2 ; /* 0x0000000207000388 */
/* 0x0003e80000004800 */
/*0140*/ @!P0 STS [R7.X4], R6 ; /* 0x0000000607008388 */
/* 0x0003e20000004800 */
/*0150*/ BRA 0x1a0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0160*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*0170*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0203 */
/*0180*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0190*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041e40000004800 */
/*01a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*01b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*01c0*/ ISETP.GE.AND P0, PT, R8, 0x2, PT ; /* 0x000000020800780c */
/* 0x000fda0003f06270 */
/*01d0*/ @!P0 BRA 0x380 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*01e0*/ LEA.HI R2, R5, R5, RZ, 0x1 ; /* 0x0000000505027211 */
/* 0x003fe200078f08ff */
/*01f0*/ IMAD.MOV.U32 R6, RZ, RZ, R5 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0005 */
/*0200*/ BSSY B0, 0x350 ; /* 0x0000014000007945 */
/* 0x000fe40003800000 */
/*0210*/ SHF.R.S32.HI R5, RZ, 0x1, R2 ; /* 0x00000001ff057819 */
/* 0x000fc80000011402 */
/*0220*/ ISETP.GE.AND P0, PT, R7, R5, PT ; /* 0x000000050700720c */
/* 0x000fda0003f06270 */
/*0230*/ @P0 BRA 0x340 ; /* 0x0000010000000947 */
/* 0x000fea0003800000 */
/*0240*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff0a7624 */
/* 0x000fe200078e00ff */
/*0250*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x174], PT ; /* 0x00005d00ff007a0c */
/* 0x000fe40003f05270 */
/*0260*/ LEA R8, R5, R0, 0x2 ; /* 0x0000000005087211 */
/* 0x000fe400078e10ff */
/*0270*/ ISETP.NE.AND P0, PT, R10, 0x3, P0 ; /* 0x000000030a00780c */
/* 0x000fda0000705270 */
/*0280*/ @!P0 LDS R3, [R7.X4] ; /* 0x0000000007038984 */
/* 0x000fe80000004800 */
/*0290*/ @!P0 LDS R2, [R8] ; /* 0x0000000008028984 */
/* 0x000e240000000800 */
/*02a0*/ @!P0 IMAD.IADD R2, R2, 0x1, R3 ; /* 0x0000000102028824 */
/* 0x001fca00078e0203 */
/*02b0*/ @!P0 STS [R7.X4], R2 ; /* 0x0000000207008388 */
/* 0x0001e20000004800 */
/*02c0*/ @!P0 BRA 0x340 ; /* 0x0000007000008947 */
/* 0x000fea0003800000 */
/*02d0*/ LDS R9, [R8] ; /* 0x0000000008097984 */
/* 0x000fe20000000800 */
/*02e0*/ ISETP.NE.AND P0, PT, R10, 0x1, PT ; /* 0x000000010a00780c */
/* 0x000fc60003f05270 */
/*02f0*/ LDS R2, [R7.X4] ; /* 0x0000000007027984 */
/* 0x001e340000004800 */
/*0300*/ @P0 IMNMX R3, R9, R2, PT ; /* 0x0000000209030217 */
/* 0x001fc40003800200 */
/*0310*/ @!P0 IMNMX R2, R9, R2, !PT ; /* 0x0000000209028217 */
/* 0x000fc60007800200 */
/*0320*/ @P0 STS [R7.X4], R3 ; /* 0x0000000307000388 */
/* 0x0001e80000004800 */
/*0330*/ @!P0 STS [R7.X4], R2 ; /* 0x0000000207008388 */
/* 0x0001e40000004800 */
/*0340*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0350*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0360*/ ISETP.GT.AND P0, PT, R6, 0x3, PT ; /* 0x000000030600780c */
/* 0x000fda0003f04270 */
/*0370*/ @P0 BRA 0x1e0 ; /* 0xfffffe6000000947 */
/* 0x000fea000383ffff */
/*0380*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000ea20000000800 */
/*0390*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x001fc800078e00ff */
/*03a0*/ IMAD.WIDE.U32 R2, R4, R3, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x002fca00078e0003 */
/*03b0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x004fe2000c101904 */
/*03c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03d0*/ BRA 0x3d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9calculatePiS_ii
.globl _Z9calculatePiS_ii
.p2align 8
.type _Z9calculatePiS_ii,@function
_Z9calculatePiS_ii:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x10
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
v_cmp_le_i32_e32 vcc_lo, s4, v1
s_and_saveexec_b32 s4, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s4, exec_lo, s4
s_cbranch_execz .LBB0_14
s_cmp_lt_i32 s5, 1
s_cbranch_scc1 .LBB0_4
s_cmp_gt_i32 s5, 2
s_cbranch_scc0 .LBB0_5
s_cmp_lg_u32 s5, 3
s_mov_b32 s6, 0
s_cselect_b32 s7, -1, 0
s_cbranch_execz .LBB0_6
s_branch .LBB0_9
.LBB0_4:
s_mov_b32 s7, 0
s_cbranch_execnz .LBB0_10
s_branch .LBB0_11
.LBB0_5:
s_mov_b32 s7, 0
.LBB0_6:
s_cmp_eq_u32 s5, 1
s_cbranch_scc0 .LBB0_8
s_mov_b32 s7, 0
s_mov_b32 s6, 0x80000001
s_branch .LBB0_9
.LBB0_8:
s_mov_b32 s7, -1
.LBB0_9:
s_branch .LBB0_11
.LBB0_10:
s_cmp_lg_u32 s5, 0
s_mov_b32 s6, 0
s_cselect_b32 s7, -1, 0
.LBB0_11:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s7
s_cbranch_vccnz .LBB0_13
s_brev_b32 s6, -2
.LBB0_13:
.LBB0_14:
s_or_saveexec_b32 s4, s4
v_mov_b32_e32 v2, s6
s_xor_b32 exec_lo, exec_lo, s4
s_cbranch_execz .LBB0_16
s_load_b64 s[6:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
global_load_b32 v2, v[1:2], off
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s4
v_lshl_add_u32 v1, v0, 2, 0
s_cmp_lt_u32 s3, 2
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB0_18
.LBB0_17:
s_or_b32 exec_lo, exec_lo, s6
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s3, 4
s_mov_b32 s3, s4
.LBB0_18:
buffer_gl0_inv
s_cbranch_scc1 .LBB0_34
s_lshr_b32 s4, s3, 1
s_mov_b32 s6, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB0_17
s_cmp_lt_i32 s5, 1
s_cbranch_scc1 .LBB0_23
s_cmp_gt_i32 s5, 2
s_cbranch_scc0 .LBB0_24
s_cmp_lg_u32 s5, 3
s_mov_b32 s7, -1
s_cselect_b32 s8, -1, 0
s_cbranch_execz .LBB0_25
s_branch .LBB0_27
.LBB0_23:
s_mov_b32 s9, -1
s_mov_b32 s7, 0
s_mov_b32 s8, 0
s_branch .LBB0_28
.LBB0_24:
s_mov_b32 s7, 0
s_mov_b32 s8, 0
.LBB0_25:
s_cmp_eq_u32 s5, 1
s_mov_b32 s8, -1
s_cbranch_scc0 .LBB0_27
v_add_nc_u32_e32 v2, s4, v0
s_mov_b32 s8, 0
s_mov_b32 s9, 0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v2, v2, 2, 0
ds_load_b32 v3, v1
ds_load_b32 v2, v2
s_waitcnt lgkmcnt(0)
v_max_i32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB0_28
.LBB0_27:
s_mov_b32 s9, 0
.LBB0_28:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 vcc_lo, exec_lo, s9
s_cbranch_vccz .LBB0_30
s_cmp_eq_u32 s5, 0
s_cselect_b32 s7, -1, 0
s_cmp_lg_u32 s5, 0
s_cselect_b32 s8, -1, 0
.LBB0_30:
v_add_nc_u32_e32 v2, s4, v0
s_and_not1_b32 vcc_lo, exec_lo, s8
s_cbranch_vccnz .LBB0_32
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v3, v2, 2, 0
s_mov_b32 s7, 0
ds_load_b32 v4, v1
ds_load_b32 v3, v3
s_waitcnt lgkmcnt(0)
v_min_i32_e32 v3, v4, v3
ds_store_b32 v1, v3
.LBB0_32:
s_and_not1_b32 vcc_lo, exec_lo, s7
s_cbranch_vccnz .LBB0_17
v_lshl_add_u32 v2, v2, 2, 0
ds_load_b32 v2, v2
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB0_17
.LBB0_34:
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_mov_b32 s3, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v1, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9calculatePiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9calculatePiS_ii, .Lfunc_end0-_Z9calculatePiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9calculatePiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9calculatePiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0008c4a7_00000000-6_calculate.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9calculatePiS_iiPiS_ii
.type _Z32__device_stub__Z9calculatePiS_iiPiS_ii, @function
_Z32__device_stub__Z9calculatePiS_iiPiS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9calculatePiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z32__device_stub__Z9calculatePiS_iiPiS_ii, .-_Z32__device_stub__Z9calculatePiS_iiPiS_ii
.globl _Z9calculatePiS_ii
.type _Z9calculatePiS_ii, @function
_Z9calculatePiS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9calculatePiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9calculatePiS_ii, .-_Z9calculatePiS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9calculatePiS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9calculatePiS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "calculate.hip"
.globl _Z24__device_stub__calculatePiS_ii # -- Begin function _Z24__device_stub__calculatePiS_ii
.p2align 4, 0x90
.type _Z24__device_stub__calculatePiS_ii,@function
_Z24__device_stub__calculatePiS_ii: # @_Z24__device_stub__calculatePiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9calculatePiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__calculatePiS_ii, .Lfunc_end0-_Z24__device_stub__calculatePiS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9calculatePiS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9calculatePiS_ii,@object # @_Z9calculatePiS_ii
.section .rodata,"a",@progbits
.globl _Z9calculatePiS_ii
.p2align 3, 0x0
_Z9calculatePiS_ii:
.quad _Z24__device_stub__calculatePiS_ii
.size _Z9calculatePiS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9calculatePiS_ii"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__calculatePiS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9calculatePiS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.