system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
/* This code will generate a fractal image. Uses OpenCV, to compile:
nvcc CudaFinal.cu `pkg-config --cflags --libs opencv` */
typedef enum color {BLUE, GREEN, RED} Color;
__global__ void convert_to_hsv(unsigned char *src, float *hsv, int width, int heigth, int step, int channels) {
float r, g, b;
float h, s, v;
int ren,col;
ren = blockIdx.x;
col = threadIdx.x;
r = src[(ren * step) + (col * channels) + RED] / 255.0f;
g = src[(ren * step) + (col * channels) + GREEN] / 255.0f;
b = src[(ren * step) + (col * channels) + BLUE] / 255.0f;
float max = fmax(r, fmax(g, b));
float min = fmin(r, fmin(g, b));
float diff = max - min;
v = max;
if(v == 0.0f) { // black
h = s = 0.0f;
} else {
s = diff / v;
if(diff < 0.001f) { // grey
h = 0.0f;
} else { // color
if(max == r) {
h = 60.0f * (g - b)/diff;
if(h < 0.0f) { h += 360.0f; }
} else if(max == g) {
h = 60.0f * (2 + (b - r)/diff);
} else {
h = 60.0f * (4 + (r - g)/diff);
}
}
}
// confusion line
float minh=40.0f;
float maxh=200.0f;
float minis = 0;
float maxs = 100;
float miniv = 0;
float maxv = 100;
// if conditionals to check the color blindness line, if the pixel is in this line i change the color to other color base shifting the h
if (h > minh && h < maxh && s > minis && s < maxs && v > miniv && v < maxv){
hsv[(ren * step) + (col * channels) + RED] = (float) (h + 140.0f);
hsv[(ren * step) + (col * channels) + GREEN] = (float) (s);
hsv[(ren * step) + (col * channels) + BLUE] = (float) (v);
} else { // this keep the pixel if it is out of the color blindnessline
hsv[(ren * step) + (col * channels) + RED] = (float) (h);
hsv[(ren * step) + (col * channels) + GREEN] = (float) (s);
hsv[(ren * step) + (col * channels) + BLUE] = (float) (v);
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
/* This code will generate a fractal image. Uses OpenCV, to compile:
nvcc CudaFinal.cu `pkg-config --cflags --libs opencv` */
typedef enum color {BLUE, GREEN, RED} Color;
__global__ void convert_to_hsv(unsigned char *src, float *hsv, int width, int heigth, int step, int channels) {
float r, g, b;
float h, s, v;
int ren,col;
ren = blockIdx.x;
col = threadIdx.x;
r = src[(ren * step) + (col * channels) + RED] / 255.0f;
g = src[(ren * step) + (col * channels) + GREEN] / 255.0f;
b = src[(ren * step) + (col * channels) + BLUE] / 255.0f;
float max = fmax(r, fmax(g, b));
float min = fmin(r, fmin(g, b));
float diff = max - min;
v = max;
if(v == 0.0f) { // black
h = s = 0.0f;
} else {
s = diff / v;
if(diff < 0.001f) { // grey
h = 0.0f;
} else { // color
if(max == r) {
h = 60.0f * (g - b)/diff;
if(h < 0.0f) { h += 360.0f; }
} else if(max == g) {
h = 60.0f * (2 + (b - r)/diff);
} else {
h = 60.0f * (4 + (r - g)/diff);
}
}
}
// confusion line
float minh=40.0f;
float maxh=200.0f;
float minis = 0;
float maxs = 100;
float miniv = 0;
float maxv = 100;
// if conditionals to check the color blindness line, if the pixel is in this line i change the color to other color base shifting the h
if (h > minh && h < maxh && s > minis && s < maxs && v > miniv && v < maxv){
hsv[(ren * step) + (col * channels) + RED] = (float) (h + 140.0f);
hsv[(ren * step) + (col * channels) + GREEN] = (float) (s);
hsv[(ren * step) + (col * channels) + BLUE] = (float) (v);
} else { // this keep the pixel if it is out of the color blindnessline
hsv[(ren * step) + (col * channels) + RED] = (float) (h);
hsv[(ren * step) + (col * channels) + GREEN] = (float) (s);
hsv[(ren * step) + (col * channels) + BLUE] = (float) (v);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/* This code will generate a fractal image. Uses OpenCV, to compile:
nvcc CudaFinal.cu `pkg-config --cflags --libs opencv` */
typedef enum color {BLUE, GREEN, RED} Color;
__global__ void convert_to_hsv(unsigned char *src, float *hsv, int width, int heigth, int step, int channels) {
float r, g, b;
float h, s, v;
int ren,col;
ren = blockIdx.x;
col = threadIdx.x;
r = src[(ren * step) + (col * channels) + RED] / 255.0f;
g = src[(ren * step) + (col * channels) + GREEN] / 255.0f;
b = src[(ren * step) + (col * channels) + BLUE] / 255.0f;
float max = fmax(r, fmax(g, b));
float min = fmin(r, fmin(g, b));
float diff = max - min;
v = max;
if(v == 0.0f) { // black
h = s = 0.0f;
} else {
s = diff / v;
if(diff < 0.001f) { // grey
h = 0.0f;
} else { // color
if(max == r) {
h = 60.0f * (g - b)/diff;
if(h < 0.0f) { h += 360.0f; }
} else if(max == g) {
h = 60.0f * (2 + (b - r)/diff);
} else {
h = 60.0f * (4 + (r - g)/diff);
}
}
}
// confusion line
float minh=40.0f;
float maxh=200.0f;
float minis = 0;
float maxs = 100;
float miniv = 0;
float maxv = 100;
// if conditionals to check the color blindness line, if the pixel is in this line i change the color to other color base shifting the h
if (h > minh && h < maxh && s > minis && s < maxs && v > miniv && v < maxv){
hsv[(ren * step) + (col * channels) + RED] = (float) (h + 140.0f);
hsv[(ren * step) + (col * channels) + GREEN] = (float) (s);
hsv[(ren * step) + (col * channels) + BLUE] = (float) (v);
} else { // this keep the pixel if it is out of the color blindnessline
hsv[(ren * step) + (col * channels) + RED] = (float) (h);
hsv[(ren * step) + (col * channels) + GREEN] = (float) (s);
hsv[(ren * step) + (col * channels) + BLUE] = (float) (v);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14convert_to_hsvPhPfiiii
.globl _Z14convert_to_hsvPhPfiiii
.p2align 8
.type _Z14convert_to_hsvPhPfiiii,@function
_Z14convert_to_hsvPhPfiiii:
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x18
s_load_b64 s[4:5], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s15, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, v0, s3, s[2:3]
v_add_nc_u32_e32 v3, 2, v1
v_add_nc_u32_e32 v5, 1, v1
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v4, 31, v3
v_ashrrev_i32_e32 v6, 31, v5
v_add_co_u32 v7, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e32 v8, vcc_lo, s5, v4, vcc_lo
v_add_co_u32 v9, vcc_lo, s4, v5
v_add_co_ci_u32_e32 v10, vcc_lo, s5, v6, vcc_lo
v_add_co_u32 v11, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v12, vcc_lo, s5, v2, vcc_lo
s_clause 0x2
global_load_u8 v0, v[7:8], off
global_load_u8 v7, v[9:10], off
global_load_u8 v8, v[11:12], off
s_waitcnt vmcnt(2)
v_cvt_f32_ubyte0_e32 v0, v0
s_waitcnt vmcnt(1)
v_cvt_f32_ubyte0_e32 v7, v7
s_waitcnt vmcnt(0)
v_cvt_f32_ubyte0_e32 v8, v8
v_div_scale_f32 v9, null, 0x437f0000, 0x437f0000, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_div_scale_f32 v10, null, 0x437f0000, 0x437f0000, v7
v_div_scale_f32 v11, null, 0x437f0000, 0x437f0000, v8
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_rcp_f32_e32 v12, v9
v_rcp_f32_e32 v13, v10
v_div_scale_f32 v15, vcc_lo, v0, 0x437f0000, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(TRANS32_DEP_3)
v_rcp_f32_e32 v14, v11
v_div_scale_f32 v16, s2, v7, 0x437f0000, v7
v_div_scale_f32 v20, s3, v8, 0x437f0000, v8
v_fma_f32 v17, -v9, v12, 1.0
s_waitcnt_depctr 0xfff
v_fma_f32 v18, -v10, v13, 1.0
v_fma_f32 v19, -v11, v14, 1.0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_fmac_f32 v12, v17, v12 :: v_dual_fmac_f32 v13, v18, v13
v_fmac_f32_e32 v14, v19, v14
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mul_f32 v17, v15, v12 :: v_dual_mul_f32 v18, v16, v13
v_fma_f32 v21, -v9, v17, v15
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_fma_f32 v22, -v10, v18, v16
v_mul_f32_e32 v19, v20, v14
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_fmac_f32 v17, v21, v12 :: v_dual_fmac_f32 v18, v22, v13
v_fma_f32 v23, -v11, v19, v20
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_fma_f32 v9, -v9, v17, v15
v_fma_f32 v10, -v10, v18, v16
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_fmac_f32_e32 v19, v23, v14
v_div_fmas_f32 v9, v9, v12, v17
s_mov_b32 vcc_lo, s2
s_delay_alu instid0(VALU_DEP_2)
v_fma_f32 v11, -v11, v19, v20
v_div_fmas_f32 v10, v10, v13, v18
s_mov_b32 vcc_lo, s3
v_div_fixup_f32 v12, v9, 0x437f0000, v0
s_mov_b32 s2, exec_lo
v_div_fmas_f32 v13, v11, v14, v19
v_div_fixup_f32 v11, v10, 0x437f0000, v7
v_mov_b32_e32 v7, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_fixup_f32 v10, v13, 0x437f0000, v8
v_mov_b32_e32 v8, 0
v_max3_f32 v0, v12, v11, v10
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_neq_f32_e32 0, v0
s_cbranch_execz .LBB0_14
v_min3_f32 v7, v12, v11, v10
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v9, v0, v7
v_div_scale_f32 v7, null, v0, v0, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v8, v7
s_waitcnt_depctr 0xfff
v_fma_f32 v13, -v7, v8, 1.0
v_fmac_f32_e32 v8, v13, v8
v_div_scale_f32 v14, vcc_lo, v9, v0, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v13, v14, v8
v_fma_f32 v15, -v7, v13, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v13, v15, v8
v_fma_f32 v7, -v7, v13, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_fmas_f32 v7, v7, v8, v13
v_mov_b32_e32 v8, 0
v_div_fixup_f32 v7, v7, v0, v9
v_cmpx_ngt_f32_e32 0x3a83126f, v9
s_cbranch_execz .LBB0_13
s_mov_b32 s4, exec_lo
v_cmpx_neq_f32_e32 v0, v12
s_xor_b32 s4, exec_lo, s4
s_cbranch_execz .LBB0_8
s_mov_b32 s5, exec_lo
v_cmpx_neq_f32_e32 v0, v11
s_xor_b32 s5, exec_lo, s5
s_cbranch_execz .LBB0_5
v_sub_f32_e32 v8, v12, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f32 v10, null, v9, v9, v8
v_rcp_f32_e32 v11, v10
s_waitcnt_depctr 0xfff
v_fma_f32 v12, -v10, v11, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v11, v12, v11
v_div_scale_f32 v12, vcc_lo, v8, v9, v8
v_mul_f32_e32 v13, v12, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v14, -v10, v13, v12
v_fmac_f32_e32 v13, v14, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v10, -v10, v13, v12
v_div_fmas_f32 v10, v10, v11, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f32 v8, v10, v9, v8
v_add_f32_e32 v8, 4.0, v8
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v8, 0x42700000, v8
.LBB0_5:
s_and_not1_saveexec_b32 s5, s5
s_cbranch_execz .LBB0_7
v_sub_f32_e32 v8, v10, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f32 v10, null, v9, v9, v8
v_rcp_f32_e32 v11, v10
s_waitcnt_depctr 0xfff
v_fma_f32 v12, -v10, v11, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v11, v12, v11
v_div_scale_f32 v12, vcc_lo, v8, v9, v8
v_mul_f32_e32 v13, v12, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v14, -v10, v13, v12
v_fmac_f32_e32 v13, v14, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v10, -v10, v13, v12
v_div_fmas_f32 v10, v10, v11, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f32 v8, v10, v9, v8
v_add_f32_e32 v8, 2.0, v8
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v8, 0x42700000, v8
.LBB0_7:
s_or_b32 exec_lo, exec_lo, s5
.LBB0_8:
s_and_not1_saveexec_b32 s4, s4
s_cbranch_execz .LBB0_12
v_sub_f32_e32 v8, v11, v10
s_mov_b32 s5, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v8, 0x42700000, v8
v_div_scale_f32 v10, null, v9, v9, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v11, v10
s_waitcnt_depctr 0xfff
v_fma_f32 v12, -v10, v11, 1.0
v_fmac_f32_e32 v11, v12, v11
v_div_scale_f32 v12, vcc_lo, v8, v9, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v13, v12, v11
v_fma_f32 v14, -v10, v13, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v13, v14, v11
v_fma_f32 v10, -v10, v13, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f32 v10, v10, v11, v13
v_div_fixup_f32 v8, v10, v9, v8
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_f32_e32 0, v8
v_add_f32_e32 v8, 0x43b40000, v8
s_or_b32 exec_lo, exec_lo, s5
.LBB0_12:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s4
.LBB0_13:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s3
.LBB0_14:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s2
s_load_b64 s[6:7], s[0:1], 0x8
v_cmp_lt_f32_e32 vcc_lo, 0x42200000, v8
v_cmp_lt_f32_e64 s0, 0, v7
v_cmp_gt_f32_e64 s1, 0x43480000, v8
v_cmp_gt_f32_e64 s2, 0x42c80000, v7
v_cmp_lt_f32_e64 s3, 0, v0
v_cmp_gt_f32_e64 s4, 0x42c80000, v0
s_and_b32 s0, vcc_lo, s0
v_lshlrev_b64 v[3:4], 2, v[3:4]
s_and_b32 s0, s0, s1
v_add_f32_e32 v9, 0x430c0000, v8
s_and_b32 s0, s2, s0
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_and_b32 s0, s3, s0
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_and_b32 vcc_lo, s4, s0
v_cndmask_b32_e32 v8, v8, v9, vcc_lo
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
v_add_co_u32 v5, vcc_lo, s6, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v6, vcc_lo
v_add_co_u32 v1, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
s_clause 0x2
global_store_b32 v[3:4], v8, off
global_store_b32 v[5:6], v7, off
global_store_b32 v[1:2], v0, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14convert_to_hsvPhPfiiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 24
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14convert_to_hsvPhPfiiii, .Lfunc_end0-_Z14convert_to_hsvPhPfiiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14convert_to_hsvPhPfiiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14convert_to_hsvPhPfiiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 24
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/* This code will generate a fractal image. Uses OpenCV, to compile:
nvcc CudaFinal.cu `pkg-config --cflags --libs opencv` */
typedef enum color {BLUE, GREEN, RED} Color;
__global__ void convert_to_hsv(unsigned char *src, float *hsv, int width, int heigth, int step, int channels) {
float r, g, b;
float h, s, v;
int ren,col;
ren = blockIdx.x;
col = threadIdx.x;
r = src[(ren * step) + (col * channels) + RED] / 255.0f;
g = src[(ren * step) + (col * channels) + GREEN] / 255.0f;
b = src[(ren * step) + (col * channels) + BLUE] / 255.0f;
float max = fmax(r, fmax(g, b));
float min = fmin(r, fmin(g, b));
float diff = max - min;
v = max;
if(v == 0.0f) { // black
h = s = 0.0f;
} else {
s = diff / v;
if(diff < 0.001f) { // grey
h = 0.0f;
} else { // color
if(max == r) {
h = 60.0f * (g - b)/diff;
if(h < 0.0f) { h += 360.0f; }
} else if(max == g) {
h = 60.0f * (2 + (b - r)/diff);
} else {
h = 60.0f * (4 + (r - g)/diff);
}
}
}
// confusion line
float minh=40.0f;
float maxh=200.0f;
float minis = 0;
float maxs = 100;
float miniv = 0;
float maxv = 100;
// if conditionals to check the color blindness line, if the pixel is in this line i change the color to other color base shifting the h
if (h > minh && h < maxh && s > minis && s < maxs && v > miniv && v < maxv){
hsv[(ren * step) + (col * channels) + RED] = (float) (h + 140.0f);
hsv[(ren * step) + (col * channels) + GREEN] = (float) (s);
hsv[(ren * step) + (col * channels) + BLUE] = (float) (v);
} else { // this keep the pixel if it is out of the color blindnessline
hsv[(ren * step) + (col * channels) + RED] = (float) (h);
hsv[(ren * step) + (col * channels) + GREEN] = (float) (s);
hsv[(ren * step) + (col * channels) + BLUE] = (float) (v);
}
} | .text
.file "convert_to_hsv.hip"
.globl _Z29__device_stub__convert_to_hsvPhPfiiii # -- Begin function _Z29__device_stub__convert_to_hsvPhPfiiii
.p2align 4, 0x90
.type _Z29__device_stub__convert_to_hsvPhPfiiii,@function
_Z29__device_stub__convert_to_hsvPhPfiiii: # @_Z29__device_stub__convert_to_hsvPhPfiiii
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14convert_to_hsvPhPfiiii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z29__device_stub__convert_to_hsvPhPfiiii, .Lfunc_end0-_Z29__device_stub__convert_to_hsvPhPfiiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14convert_to_hsvPhPfiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14convert_to_hsvPhPfiiii,@object # @_Z14convert_to_hsvPhPfiiii
.section .rodata,"a",@progbits
.globl _Z14convert_to_hsvPhPfiiii
.p2align 3, 0x0
_Z14convert_to_hsvPhPfiiii:
.quad _Z29__device_stub__convert_to_hsvPhPfiiii
.size _Z14convert_to_hsvPhPfiiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z14convert_to_hsvPhPfiiii"
.size .L__unnamed_1, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__convert_to_hsvPhPfiiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14convert_to_hsvPhPfiiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0007e8e3_00000000-6_convert_to_hsv.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z40__device_stub__Z14convert_to_hsvPhPfiiiiPhPfiiii
.type _Z40__device_stub__Z14convert_to_hsvPhPfiiiiPhPfiiii, @function
_Z40__device_stub__Z14convert_to_hsvPhPfiiiiPhPfiiii:
.LFB2051:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 184
pushq 40(%rsp)
.cfi_def_cfa_offset 192
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14convert_to_hsvPhPfiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z40__device_stub__Z14convert_to_hsvPhPfiiiiPhPfiiii, .-_Z40__device_stub__Z14convert_to_hsvPhPfiiiiPhPfiiii
.globl _Z14convert_to_hsvPhPfiiii
.type _Z14convert_to_hsvPhPfiiii, @function
_Z14convert_to_hsvPhPfiiii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z14convert_to_hsvPhPfiiiiPhPfiiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z14convert_to_hsvPhPfiiii, .-_Z14convert_to_hsvPhPfiiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z14convert_to_hsvPhPfiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14convert_to_hsvPhPfiiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "convert_to_hsv.hip"
.globl _Z29__device_stub__convert_to_hsvPhPfiiii # -- Begin function _Z29__device_stub__convert_to_hsvPhPfiiii
.p2align 4, 0x90
.type _Z29__device_stub__convert_to_hsvPhPfiiii,@function
_Z29__device_stub__convert_to_hsvPhPfiiii: # @_Z29__device_stub__convert_to_hsvPhPfiiii
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14convert_to_hsvPhPfiiii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z29__device_stub__convert_to_hsvPhPfiiii, .Lfunc_end0-_Z29__device_stub__convert_to_hsvPhPfiiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14convert_to_hsvPhPfiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14convert_to_hsvPhPfiiii,@object # @_Z14convert_to_hsvPhPfiiii
.section .rodata,"a",@progbits
.globl _Z14convert_to_hsvPhPfiiii
.p2align 3, 0x0
_Z14convert_to_hsvPhPfiiii:
.quad _Z29__device_stub__convert_to_hsvPhPfiiii
.size _Z14convert_to_hsvPhPfiiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z14convert_to_hsvPhPfiiii"
.size .L__unnamed_1, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__convert_to_hsvPhPfiiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14convert_to_hsvPhPfiiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | extern __device__ void BiAverage(double *A, double *B, int p0, int datasize, int logsize)
{
int stride = datasize;
for (int q = 1; q <= logsize; q++)
{
stride = stride >> 1;
if (p0 < stride)
{
A[p0] += A[p0 + stride];
}
else
{
if (p0 < 2 * stride)
{
B[p0 - stride] += B[p0];
}
}
__syncthreads();
}
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | extern __device__ void BiAverage(double *A, double *B, int p0, int datasize, int logsize)
{
int stride = datasize;
for (int q = 1; q <= logsize; q++)
{
stride = stride >> 1;
if (p0 < stride)
{
A[p0] += A[p0 + stride];
}
else
{
if (p0 < 2 * stride)
{
B[p0 - stride] += B[p0];
}
}
__syncthreads();
}
} | .file "tmpxft_0012eb80_00000000-6_solvers_common.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9BiAveragePdS_iii
.type _Z9BiAveragePdS_iii, @function
_Z9BiAveragePdS_iii:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z9BiAveragePdS_iii, .-_Z9BiAveragePdS_iii
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | extern __device__ void BiAverage(double *A, double *B, int p0, int datasize, int logsize)
{
int stride = datasize;
for (int q = 1; q <= logsize; q++)
{
stride = stride >> 1;
if (p0 < stride)
{
A[p0] += A[p0 + stride];
}
else
{
if (p0 < 2 * stride)
{
B[p0 - stride] += B[p0];
}
}
__syncthreads();
}
} | #include <hip/hip_runtime.h>
extern __device__ void BiAverage(double *A, double *B, int p0, int datasize, int logsize)
{
int stride = datasize;
for (int q = 1; q <= logsize; q++)
{
stride = stride >> 1;
if (p0 < stride)
{
A[p0] += A[p0 + stride];
}
else
{
if (p0 < 2 * stride)
{
B[p0 - stride] += B[p0];
}
}
__syncthreads();
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
extern __device__ void BiAverage(double *A, double *B, int p0, int datasize, int logsize)
{
int stride = datasize;
for (int q = 1; q <= logsize; q++)
{
stride = stride >> 1;
if (p0 < stride)
{
A[p0] += A[p0 + stride];
}
else
{
if (p0 < 2 * stride)
{
B[p0 - stride] += B[p0];
}
}
__syncthreads();
}
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
extern __device__ void BiAverage(double *A, double *B, int p0, int datasize, int logsize)
{
int stride = datasize;
for (int q = 1; q <= logsize; q++)
{
stride = stride >> 1;
if (p0 < stride)
{
A[p0] += A[p0 + stride];
}
else
{
if (p0 < 2 * stride)
{
B[p0 - stride] += B[p0];
}
}
__syncthreads();
}
} | .text
.file "solvers_common.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0012eb80_00000000-6_solvers_common.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9BiAveragePdS_iii
.type _Z9BiAveragePdS_iii, @function
_Z9BiAveragePdS_iii:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z9BiAveragePdS_iii, .-_Z9BiAveragePdS_iii
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "solvers_common.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void update_array_two_gpu(int m, int n, int i, int numberOfThreadsRequired, int count, int oldCount, int *d_array )
{
long j=blockIdx.x *blockDim.x + threadIdx.x;
if (j> numberOfThreadsRequired)
{}
else
{
d_Z2 = d_A2 + 1;
if (j < n)
{
d_Z1 = d_A1 + 1;
}
}
} | code for sm_80
Function : _Z20update_array_two_gpuiiiiiiPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x16c] ; /* 0x00005b0000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ USHF.R.S32.HI UR4, URZ, 0x1f, UR4 ; /* 0x0000001f3f047899 */
/* 0x000fe20008011404 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GT.U32.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */
/* 0x000fc80003f04070 */
/*0070*/ ISETP.GT.AND.EX P0, PT, RZ, UR4, PT, P0 ; /* 0x00000004ff007c0c */
/* 0x000fda000bf04300 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff067624 */
/* 0x000fe200078e00ff */
/*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc80000000a00 */
/*00b0*/ IADD3 R3, R6, c[0x0][0x174], RZ ; /* 0x00005d0006037a10 */
/* 0x000fc80007ffe0ff */
/*00c0*/ IADD3 R4, P0, R0, R3, RZ ; /* 0x0000000300047210 */
/* 0x000fc80007f1e0ff */
/*00d0*/ LEA.HI.X.SX32 R3, R3, RZ, 0x1, P0 ; /* 0x000000ff03037211 */
/* 0x000fe400000f0eff */
/*00e0*/ LEA R2, P0, R4, c[0x0][0x178], 0x2 ; /* 0x00005e0004027a11 */
/* 0x000fc800078010ff */
/*00f0*/ LEA.HI.X R3, R4, c[0x0][0x17c], R3, 0x2, P0 ; /* 0x00005f0004037a11 */
/* 0x000fca00000f1403 */
/*0100*/ LDG.E R2, [R2.64+0x4] ; /* 0x0000040402027981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IADD3 R5, R6, c[0x0][0x170], RZ ; /* 0x00005c0006057a10 */
/* 0x000fe40007ffe0ff */
/*0120*/ ISETP.GE.U32.AND P0, PT, R0.reuse, c[0x0][0x164], PT ; /* 0x0000590000007a0c */
/* 0x040fe40003f06070 */
/*0130*/ SHF.R.S32.HI R6, RZ, 0x1f, R6 ; /* 0x0000001fff067819 */
/* 0x000fe40000011406 */
/*0140*/ IADD3 R7, P1, R0, R5, RZ ; /* 0x0000000500077210 */
/* 0x000fe40007f3e0ff */
/*0150*/ ISETP.GE.AND.EX P0, PT, RZ, R6, PT, P0 ; /* 0x00000006ff00720c */
/* 0x000fe40003f06300 */
/*0160*/ LEA.HI.X.SX32 R8, R5, RZ, 0x1, P1 ; /* 0x000000ff05087211 */
/* 0x000fc400008f0eff */
/*0170*/ LEA R4, P1, R7, c[0x0][0x178], 0x2 ; /* 0x00005e0007047a11 */
/* 0x000fc800078210ff */
/*0180*/ LEA.HI.X R5, R7, c[0x0][0x17c], R8, 0x2, P1 ; /* 0x00005f0007057a11 */
/* 0x000fe400008f1408 */
/*0190*/ IADD3 R7, R2, 0x1, RZ ; /* 0x0000000102077810 */
/* 0x004fca0007ffe0ff */
/*01a0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001e2000c101904 */
/*01b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*01c0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff047624 */
/* 0x001fe200078e00ff */
/*01d0*/ IADD3 R3, P0, R0, c[0x0][0x174], RZ ; /* 0x00005d0000037a10 */
/* 0x000fc80007f1e0ff */
/*01e0*/ LEA.HI.X.SX32 R4, R4, RZ, 0x1, P0 ; /* 0x000000ff04047211 */
/* 0x000fe400000f0eff */
/*01f0*/ LEA R2, P0, R3, c[0x0][0x178], 0x2 ; /* 0x00005e0003027a11 */
/* 0x000fc800078010ff */
/*0200*/ LEA.HI.X R3, R3, c[0x0][0x17c], R4, 0x2, P0 ; /* 0x00005f0003037a11 */
/* 0x000fca00000f1404 */
/*0210*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0220*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff057624 */
/* 0x000fe200078e00ff */
/*0230*/ IADD3 R0, P0, R0, c[0x0][0x170], RZ ; /* 0x00005c0000007a10 */
/* 0x000fc80007f1e0ff */
/*0240*/ LEA.HI.X.SX32 R5, R5, RZ, 0x1, P0 ; /* 0x000000ff05057211 */
/* 0x000fe400000f0eff */
/*0250*/ LEA R4, P0, R0, c[0x0][0x178], 0x2 ; /* 0x00005e0000047a11 */
/* 0x000fc800078010ff */
/*0260*/ LEA.HI.X R5, R0, c[0x0][0x17c], R5, 0x2, P0 ; /* 0x00005f0000057a11 */
/* 0x000fe400000f1405 */
/*0270*/ IADD3 R7, R2, 0x1, RZ ; /* 0x0000000102077810 */
/* 0x004fca0007ffe0ff */
/*0280*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0290*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02a0*/ BRA 0x2a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void update_array_two_gpu(int m, int n, int i, int numberOfThreadsRequired, int count, int oldCount, int *d_array )
{
long j=blockIdx.x *blockDim.x + threadIdx.x;
if (j> numberOfThreadsRequired)
{}
else
{
d_Z2 = d_A2 + 1;
if (j < n)
{
d_Z1 = d_A1 + 1;
}
}
} | .file "tmpxft_00178782_00000000-6_update_array_two_gpu.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z46__device_stub__Z20update_array_two_gpuiiiiiiPiiiiiiiPi
.type _Z46__device_stub__Z20update_array_two_gpuiiiiiiPiiiiiiiPi, @function
_Z46__device_stub__Z20update_array_two_gpuiiiiiiPiiiiiiiPi:
.LFB2051:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq 176(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
movq %rsp, %rax
movq %rax, 144(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 184
pushq 40(%rsp)
.cfi_def_cfa_offset 192
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z20update_array_two_gpuiiiiiiPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z46__device_stub__Z20update_array_two_gpuiiiiiiPiiiiiiiPi, .-_Z46__device_stub__Z20update_array_two_gpuiiiiiiPiiiiiiiPi
.globl _Z20update_array_two_gpuiiiiiiPi
.type _Z20update_array_two_gpuiiiiiiPi, @function
_Z20update_array_two_gpuiiiiiiPi:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z46__device_stub__Z20update_array_two_gpuiiiiiiPiiiiiiiPi
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z20update_array_two_gpuiiiiiiPi, .-_Z20update_array_two_gpuiiiiiiPi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z20update_array_two_gpuiiiiiiPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z20update_array_two_gpuiiiiiiPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void update_array_two_gpu(int m, int n, int i, int numberOfThreadsRequired, int count, int oldCount, int *d_array )
{
long j=blockIdx.x *blockDim.x + threadIdx.x;
if (j> numberOfThreadsRequired)
{}
else
{
d_Z2 = d_A2 + 1;
if (j < n)
{
d_Z1 = d_A1 + 1;
}
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void update_array_two_gpu(int m, int n, int i, int numberOfThreadsRequired, int count, int oldCount, int *d_array )
{
long j=blockIdx.x *blockDim.x + threadIdx.x;
if (j> numberOfThreadsRequired)
{}
else
{
d_Z2 = d_A2 + 1;
if (j < n)
{
d_Z1 = d_A1 + 1;
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void update_array_two_gpu(int m, int n, int i, int numberOfThreadsRequired, int count, int oldCount, int *d_array )
{
long j=blockIdx.x *blockDim.x + threadIdx.x;
if (j> numberOfThreadsRequired)
{}
else
{
d_Z2 = d_A2 + 1;
if (j < n)
{
d_Z1 = d_A1 + 1;
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z20update_array_two_gpuiiiiiiPi
.globl _Z20update_array_two_gpuiiiiiiPi
.p2align 8
.type _Z20update_array_two_gpuiiiiiiPi,@function
_Z20update_array_two_gpuiiiiiiPi:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0xc
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s15, s3, v[0:1]
v_mov_b32_e32 v3, 0
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_ge_i64_e32 vcc_lo, s[2:3], v[2:3]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_3
s_clause 0x1
s_load_b32 s4, s[0:1], 0x4
s_load_b128 s[0:3], s[0:1], 0x10
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_ashr_i32 s5, s4, 31
s_add_i32 s6, s1, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s7, s6, 31
s_lshl_b64 s[6:7], s[6:7], 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
s_add_u32 s6, s2, s6
s_addc_u32 s7, s3, s7
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
s_add_i32 s6, s0, s4
s_ashr_i32 s7, s6, 31
global_load_b32 v6, v[4:5], off offset:4
s_lshl_b64 s[6:7], s[6:7], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s6, s2, s6
s_addc_u32 s7, s3, s7
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[2:3]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v6, 1, v6
global_store_b32 v[4:5], v6, off
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_3
s_ashr_i32 s5, s1, 31
s_mov_b32 s4, s1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[4:5], s[4:5], 2
s_add_u32 s1, s2, s4
s_addc_u32 s4, s3, s5
v_add_co_u32 v2, vcc_lo, s1, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s4, v1, vcc_lo
s_ashr_i32 s1, s0, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[0:1], s[0:1], 2
global_load_b32 v2, v[2:3], off
s_add_u32 s0, s2, s0
s_addc_u32 s1, s3, s1
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, 1, v2
global_store_b32 v[0:1], v2, off
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z20update_array_two_gpuiiiiiiPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z20update_array_two_gpuiiiiiiPi, .Lfunc_end0-_Z20update_array_two_gpuiiiiiiPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z20update_array_two_gpuiiiiiiPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z20update_array_two_gpuiiiiiiPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void update_array_two_gpu(int m, int n, int i, int numberOfThreadsRequired, int count, int oldCount, int *d_array )
{
long j=blockIdx.x *blockDim.x + threadIdx.x;
if (j> numberOfThreadsRequired)
{}
else
{
d_Z2 = d_A2 + 1;
if (j < n)
{
d_Z1 = d_A1 + 1;
}
}
} | .text
.file "update_array_two_gpu.hip"
.globl _Z35__device_stub__update_array_two_gpuiiiiiiPi # -- Begin function _Z35__device_stub__update_array_two_gpuiiiiiiPi
.p2align 4, 0x90
.type _Z35__device_stub__update_array_two_gpuiiiiiiPi,@function
_Z35__device_stub__update_array_two_gpuiiiiiiPi: # @_Z35__device_stub__update_array_two_gpuiiiiiiPi
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 28(%rsp), %rax
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 88(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 144(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z20update_array_two_gpuiiiiiiPi, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z35__device_stub__update_array_two_gpuiiiiiiPi, .Lfunc_end0-_Z35__device_stub__update_array_two_gpuiiiiiiPi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z20update_array_two_gpuiiiiiiPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z20update_array_two_gpuiiiiiiPi,@object # @_Z20update_array_two_gpuiiiiiiPi
.section .rodata,"a",@progbits
.globl _Z20update_array_two_gpuiiiiiiPi
.p2align 3, 0x0
_Z20update_array_two_gpuiiiiiiPi:
.quad _Z35__device_stub__update_array_two_gpuiiiiiiPi
.size _Z20update_array_two_gpuiiiiiiPi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z20update_array_two_gpuiiiiiiPi"
.size .L__unnamed_1, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z35__device_stub__update_array_two_gpuiiiiiiPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z20update_array_two_gpuiiiiiiPi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z20update_array_two_gpuiiiiiiPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x16c] ; /* 0x00005b0000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ USHF.R.S32.HI UR4, URZ, 0x1f, UR4 ; /* 0x0000001f3f047899 */
/* 0x000fe20008011404 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GT.U32.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */
/* 0x000fc80003f04070 */
/*0070*/ ISETP.GT.AND.EX P0, PT, RZ, UR4, PT, P0 ; /* 0x00000004ff007c0c */
/* 0x000fda000bf04300 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff067624 */
/* 0x000fe200078e00ff */
/*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc80000000a00 */
/*00b0*/ IADD3 R3, R6, c[0x0][0x174], RZ ; /* 0x00005d0006037a10 */
/* 0x000fc80007ffe0ff */
/*00c0*/ IADD3 R4, P0, R0, R3, RZ ; /* 0x0000000300047210 */
/* 0x000fc80007f1e0ff */
/*00d0*/ LEA.HI.X.SX32 R3, R3, RZ, 0x1, P0 ; /* 0x000000ff03037211 */
/* 0x000fe400000f0eff */
/*00e0*/ LEA R2, P0, R4, c[0x0][0x178], 0x2 ; /* 0x00005e0004027a11 */
/* 0x000fc800078010ff */
/*00f0*/ LEA.HI.X R3, R4, c[0x0][0x17c], R3, 0x2, P0 ; /* 0x00005f0004037a11 */
/* 0x000fca00000f1403 */
/*0100*/ LDG.E R2, [R2.64+0x4] ; /* 0x0000040402027981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IADD3 R5, R6, c[0x0][0x170], RZ ; /* 0x00005c0006057a10 */
/* 0x000fe40007ffe0ff */
/*0120*/ ISETP.GE.U32.AND P0, PT, R0.reuse, c[0x0][0x164], PT ; /* 0x0000590000007a0c */
/* 0x040fe40003f06070 */
/*0130*/ SHF.R.S32.HI R6, RZ, 0x1f, R6 ; /* 0x0000001fff067819 */
/* 0x000fe40000011406 */
/*0140*/ IADD3 R7, P1, R0, R5, RZ ; /* 0x0000000500077210 */
/* 0x000fe40007f3e0ff */
/*0150*/ ISETP.GE.AND.EX P0, PT, RZ, R6, PT, P0 ; /* 0x00000006ff00720c */
/* 0x000fe40003f06300 */
/*0160*/ LEA.HI.X.SX32 R8, R5, RZ, 0x1, P1 ; /* 0x000000ff05087211 */
/* 0x000fc400008f0eff */
/*0170*/ LEA R4, P1, R7, c[0x0][0x178], 0x2 ; /* 0x00005e0007047a11 */
/* 0x000fc800078210ff */
/*0180*/ LEA.HI.X R5, R7, c[0x0][0x17c], R8, 0x2, P1 ; /* 0x00005f0007057a11 */
/* 0x000fe400008f1408 */
/*0190*/ IADD3 R7, R2, 0x1, RZ ; /* 0x0000000102077810 */
/* 0x004fca0007ffe0ff */
/*01a0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001e2000c101904 */
/*01b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*01c0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff047624 */
/* 0x001fe200078e00ff */
/*01d0*/ IADD3 R3, P0, R0, c[0x0][0x174], RZ ; /* 0x00005d0000037a10 */
/* 0x000fc80007f1e0ff */
/*01e0*/ LEA.HI.X.SX32 R4, R4, RZ, 0x1, P0 ; /* 0x000000ff04047211 */
/* 0x000fe400000f0eff */
/*01f0*/ LEA R2, P0, R3, c[0x0][0x178], 0x2 ; /* 0x00005e0003027a11 */
/* 0x000fc800078010ff */
/*0200*/ LEA.HI.X R3, R3, c[0x0][0x17c], R4, 0x2, P0 ; /* 0x00005f0003037a11 */
/* 0x000fca00000f1404 */
/*0210*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0220*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff057624 */
/* 0x000fe200078e00ff */
/*0230*/ IADD3 R0, P0, R0, c[0x0][0x170], RZ ; /* 0x00005c0000007a10 */
/* 0x000fc80007f1e0ff */
/*0240*/ LEA.HI.X.SX32 R5, R5, RZ, 0x1, P0 ; /* 0x000000ff05057211 */
/* 0x000fe400000f0eff */
/*0250*/ LEA R4, P0, R0, c[0x0][0x178], 0x2 ; /* 0x00005e0000047a11 */
/* 0x000fc800078010ff */
/*0260*/ LEA.HI.X R5, R0, c[0x0][0x17c], R5, 0x2, P0 ; /* 0x00005f0000057a11 */
/* 0x000fe400000f1405 */
/*0270*/ IADD3 R7, R2, 0x1, RZ ; /* 0x0000000102077810 */
/* 0x004fca0007ffe0ff */
/*0280*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0290*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02a0*/ BRA 0x2a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z20update_array_two_gpuiiiiiiPi
.globl _Z20update_array_two_gpuiiiiiiPi
.p2align 8
.type _Z20update_array_two_gpuiiiiiiPi,@function
_Z20update_array_two_gpuiiiiiiPi:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0xc
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s15, s3, v[0:1]
v_mov_b32_e32 v3, 0
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_ge_i64_e32 vcc_lo, s[2:3], v[2:3]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_3
s_clause 0x1
s_load_b32 s4, s[0:1], 0x4
s_load_b128 s[0:3], s[0:1], 0x10
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_ashr_i32 s5, s4, 31
s_add_i32 s6, s1, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s7, s6, 31
s_lshl_b64 s[6:7], s[6:7], 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
s_add_u32 s6, s2, s6
s_addc_u32 s7, s3, s7
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
s_add_i32 s6, s0, s4
s_ashr_i32 s7, s6, 31
global_load_b32 v6, v[4:5], off offset:4
s_lshl_b64 s[6:7], s[6:7], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s6, s2, s6
s_addc_u32 s7, s3, s7
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_cmp_gt_i64_e32 vcc_lo, s[4:5], v[2:3]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v6, 1, v6
global_store_b32 v[4:5], v6, off
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_3
s_ashr_i32 s5, s1, 31
s_mov_b32 s4, s1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[4:5], s[4:5], 2
s_add_u32 s1, s2, s4
s_addc_u32 s4, s3, s5
v_add_co_u32 v2, vcc_lo, s1, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s4, v1, vcc_lo
s_ashr_i32 s1, s0, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[0:1], s[0:1], 2
global_load_b32 v2, v[2:3], off
s_add_u32 s0, s2, s0
s_addc_u32 s1, s3, s1
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, 1, v2
global_store_b32 v[0:1], v2, off
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z20update_array_two_gpuiiiiiiPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z20update_array_two_gpuiiiiiiPi, .Lfunc_end0-_Z20update_array_two_gpuiiiiiiPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z20update_array_two_gpuiiiiiiPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z20update_array_two_gpuiiiiiiPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00178782_00000000-6_update_array_two_gpu.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z46__device_stub__Z20update_array_two_gpuiiiiiiPiiiiiiiPi
.type _Z46__device_stub__Z20update_array_two_gpuiiiiiiPiiiiiiiPi, @function
_Z46__device_stub__Z20update_array_two_gpuiiiiiiPiiiiiiiPi:
.LFB2051:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq 176(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
movq %rsp, %rax
movq %rax, 144(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 184
pushq 40(%rsp)
.cfi_def_cfa_offset 192
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z20update_array_two_gpuiiiiiiPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z46__device_stub__Z20update_array_two_gpuiiiiiiPiiiiiiiPi, .-_Z46__device_stub__Z20update_array_two_gpuiiiiiiPiiiiiiiPi
.globl _Z20update_array_two_gpuiiiiiiPi
.type _Z20update_array_two_gpuiiiiiiPi, @function
_Z20update_array_two_gpuiiiiiiPi:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z46__device_stub__Z20update_array_two_gpuiiiiiiPiiiiiiiPi
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z20update_array_two_gpuiiiiiiPi, .-_Z20update_array_two_gpuiiiiiiPi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z20update_array_two_gpuiiiiiiPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z20update_array_two_gpuiiiiiiPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "update_array_two_gpu.hip"
.globl _Z35__device_stub__update_array_two_gpuiiiiiiPi # -- Begin function _Z35__device_stub__update_array_two_gpuiiiiiiPi
.p2align 4, 0x90
.type _Z35__device_stub__update_array_two_gpuiiiiiiPi,@function
_Z35__device_stub__update_array_two_gpuiiiiiiPi: # @_Z35__device_stub__update_array_two_gpuiiiiiiPi
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 28(%rsp), %rax
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 88(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 144(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z20update_array_two_gpuiiiiiiPi, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z35__device_stub__update_array_two_gpuiiiiiiPi, .Lfunc_end0-_Z35__device_stub__update_array_two_gpuiiiiiiPi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z20update_array_two_gpuiiiiiiPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z20update_array_two_gpuiiiiiiPi,@object # @_Z20update_array_two_gpuiiiiiiPi
.section .rodata,"a",@progbits
.globl _Z20update_array_two_gpuiiiiiiPi
.p2align 3, 0x0
_Z20update_array_two_gpuiiiiiiPi:
.quad _Z35__device_stub__update_array_two_gpuiiiiiiPi
.size _Z20update_array_two_gpuiiiiiiPi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z20update_array_two_gpuiiiiiiPi"
.size .L__unnamed_1, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z35__device_stub__update_array_two_gpuiiiiiiPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z20update_array_two_gpuiiiiiiPi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
/*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/
/*(c) 2016 Brian Tarasinski*/
/*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/
//kernel to transform to pauli basis (up, x, y, down)
//to be run on a complete complex density matrix, once for each bit
//this operation is its own inverse (can also be used in opposite direction)
__global__ void get_diag(double *dm9, double *out, unsigned int no_qubits) {
int x = (blockIdx.x *blockDim.x) + threadIdx.x;
if (x >= (1 << no_qubits)) return;
unsigned int addr_real = 0;
for (int i = 0; i < 16; i++) {
addr_real |= (x & 1U << i) << i | (x & 1U << i) << (i + 1);
}
out[x] = dm9[addr_real];
} | code for sm_80
Function : _Z8get_diagPdS_j
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ UMOV UR4, 0x1 ; /* 0x0000000100047882 */
/* 0x000fe40000000000 */
/*0030*/ ULDC UR5, c[0x0][0x170] ; /* 0x00005c0000057ab9 */
/* 0x000fe20000000800 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0050*/ USHF.L.U32 UR4, UR4, UR5, URZ ; /* 0x0000000504047299 */
/* 0x000fe2000800063f */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0070*/ ISETP.GE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fda000bf06270 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ LOP3.LUT R2, R0.reuse, 0x1, RZ, 0xc0, !PT ; /* 0x0000000100027812 */
/* 0x040fe200078ec0ff */
/*00a0*/ IMAD.SHL.U32 R5, R0.reuse, 0x4, RZ ; /* 0x0000000400057824 */
/* 0x040fe200078e00ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ IMAD.SHL.U32 R7, R0, 0x8, RZ ; /* 0x0000000800077824 */
/* 0x000fe400078e00ff */
/*00d0*/ IMAD.SHL.U32 R3, R2, 0x2, RZ ; /* 0x0000000202037824 */
/* 0x000fca00078e00ff */
/*00e0*/ LOP3.LUT R2, R3, 0x2, R2, 0xe2, !PT ; /* 0x0000000203027812 */
/* 0x000fe200078ee202 */
/*00f0*/ IMAD.SHL.U32 R3, R0, 0x2, RZ ; /* 0x0000000200037824 */
/* 0x000fc600078e00ff */
/*0100*/ LOP3.LUT R2, R2, 0x8, R5, 0xf8, !PT ; /* 0x0000000802027812 */
/* 0x000fc800078ef805 */
/*0110*/ LOP3.LUT R2, R2, 0x4, R3, 0xf8, !PT ; /* 0x0000000402027812 */
/* 0x000fe200078ef803 */
/*0120*/ IMAD.SHL.U32 R3, R0, 0x10, RZ ; /* 0x0000001000037824 */
/* 0x000fc600078e00ff */
/*0130*/ LOP3.LUT R2, R2, 0x20, R7, 0xf8, !PT ; /* 0x0000002002027812 */
/* 0x000fc800078ef807 */
/*0140*/ LOP3.LUT R2, R2, 0x10, R5, 0xf8, !PT ; /* 0x0000001002027812 */
/* 0x000fe200078ef805 */
/*0150*/ IMAD.SHL.U32 R5, R0, 0x20, RZ ; /* 0x0000002000057824 */
/* 0x000fc600078e00ff */
/*0160*/ LOP3.LUT R2, R2, 0x80, R3, 0xf8, !PT ; /* 0x0000008002027812 */
/* 0x000fc800078ef803 */
/*0170*/ LOP3.LUT R2, R2, 0x40, R7, 0xf8, !PT ; /* 0x0000004002027812 */
/* 0x000fe200078ef807 */
/*0180*/ IMAD.SHL.U32 R7, R0, 0x40, RZ ; /* 0x0000004000077824 */
/* 0x000fc600078e00ff */
/*0190*/ LOP3.LUT R2, R2, 0x200, R5, 0xf8, !PT ; /* 0x0000020002027812 */
/* 0x000fc800078ef805 */
/*01a0*/ LOP3.LUT R2, R2, 0x100, R3, 0xf8, !PT ; /* 0x0000010002027812 */
/* 0x000fe200078ef803 */
/*01b0*/ IMAD.SHL.U32 R3, R0, 0x80, RZ ; /* 0x0000008000037824 */
/* 0x000fc600078e00ff */
/*01c0*/ LOP3.LUT R2, R2, 0x800, R7, 0xf8, !PT ; /* 0x0000080002027812 */
/* 0x000fc800078ef807 */
/*01d0*/ LOP3.LUT R2, R2, 0x400, R5, 0xf8, !PT ; /* 0x0000040002027812 */
/* 0x000fe200078ef805 */
/*01e0*/ IMAD.SHL.U32 R5, R0, 0x100, RZ ; /* 0x0000010000057824 */
/* 0x000fc600078e00ff */
/*01f0*/ LOP3.LUT R2, R2, 0x2000, R3, 0xf8, !PT ; /* 0x0000200002027812 */
/* 0x000fc800078ef803 */
/*0200*/ LOP3.LUT R2, R2, 0x1000, R7, 0xf8, !PT ; /* 0x0000100002027812 */
/* 0x000fe200078ef807 */
/*0210*/ IMAD.SHL.U32 R7, R0, 0x200, RZ ; /* 0x0000020000077824 */
/* 0x000fc600078e00ff */
/*0220*/ LOP3.LUT R2, R2, 0x8000, R5, 0xf8, !PT ; /* 0x0000800002027812 */
/* 0x000fc800078ef805 */
/*0230*/ LOP3.LUT R2, R2, 0x4000, R3, 0xf8, !PT ; /* 0x0000400002027812 */
/* 0x000fe200078ef803 */
/*0240*/ IMAD.SHL.U32 R3, R0, 0x400, RZ ; /* 0x0000040000037824 */
/* 0x000fc600078e00ff */
/*0250*/ LOP3.LUT R2, R2, 0x20000, R7, 0xf8, !PT ; /* 0x0002000002027812 */
/* 0x000fc800078ef807 */
/*0260*/ LOP3.LUT R2, R2, 0x10000, R5, 0xf8, !PT ; /* 0x0001000002027812 */
/* 0x000fe200078ef805 */
/*0270*/ IMAD.SHL.U32 R5, R0, 0x800, RZ ; /* 0x0000080000057824 */
/* 0x000fc600078e00ff */
/*0280*/ LOP3.LUT R2, R2, 0x80000, R3, 0xf8, !PT ; /* 0x0008000002027812 */
/* 0x000fc800078ef803 */
/*0290*/ LOP3.LUT R2, R2, 0x40000, R7, 0xf8, !PT ; /* 0x0004000002027812 */
/* 0x000fe200078ef807 */
/*02a0*/ IMAD.SHL.U32 R7, R0, 0x1000, RZ ; /* 0x0000100000077824 */
/* 0x000fc600078e00ff */
/*02b0*/ LOP3.LUT R2, R2, 0x200000, R5, 0xf8, !PT ; /* 0x0020000002027812 */
/* 0x000fc800078ef805 */
/*02c0*/ LOP3.LUT R2, R2, 0x100000, R3, 0xf8, !PT ; /* 0x0010000002027812 */
/* 0x000fe200078ef803 */
/*02d0*/ IMAD.SHL.U32 R3, R0, 0x2000, RZ ; /* 0x0000200000037824 */
/* 0x000fc600078e00ff */
/*02e0*/ LOP3.LUT R2, R2, 0x800000, R7, 0xf8, !PT ; /* 0x0080000002027812 */
/* 0x000fc800078ef807 */
/*02f0*/ LOP3.LUT R2, R2, 0x400000, R5, 0xf8, !PT ; /* 0x0040000002027812 */
/* 0x000fe200078ef805 */
/*0300*/ IMAD.SHL.U32 R5, R0, 0x4000, RZ ; /* 0x0000400000057824 */
/* 0x000fc600078e00ff */
/*0310*/ LOP3.LUT R2, R2, 0x2000000, R3, 0xf8, !PT ; /* 0x0200000002027812 */
/* 0x000fc800078ef803 */
/*0320*/ LOP3.LUT R2, R2, 0x1000000, R7, 0xf8, !PT ; /* 0x0100000002027812 */
/* 0x000fe200078ef807 */
/*0330*/ IMAD.SHL.U32 R7, R0, 0x8000, RZ ; /* 0x0000800000077824 */
/* 0x000fc600078e00ff */
/*0340*/ LOP3.LUT R2, R2, 0x8000000, R5, 0xf8, !PT ; /* 0x0800000002027812 */
/* 0x000fc800078ef805 */
/*0350*/ LOP3.LUT R2, R2, 0x4000000, R3, 0xf8, !PT ; /* 0x0400000002027812 */
/* 0x000fe200078ef803 */
/*0360*/ IMAD.U32 R3, R0, 0x10000, RZ ; /* 0x0001000000037824 */
/* 0x000fc600078e00ff */
/*0370*/ LOP3.LUT R2, R2, 0x20000000, R7, 0xf8, !PT ; /* 0x2000000002027812 */
/* 0x000fc800078ef807 */
/*0380*/ LOP3.LUT R2, R2, 0x10000000, R5, 0xf8, !PT ; /* 0x1000000002027812 */
/* 0x000fe200078ef805 */
/*0390*/ IMAD.MOV.U32 R5, RZ, RZ, 0x8 ; /* 0x00000008ff057424 */
/* 0x000fc600078e00ff */
/*03a0*/ LOP3.LUT R2, R2, 0x80000000, R3, 0xf8, !PT ; /* 0x8000000002027812 */
/* 0x000fc800078ef803 */
/*03b0*/ LOP3.LUT R2, R2, 0x40000000, R7, 0xf8, !PT ; /* 0x4000000002027812 */
/* 0x000fca00078ef807 */
/*03c0*/ IMAD.WIDE.U32 R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0005 */
/*03d0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*03e0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fca00078e0205 */
/*03f0*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */
/* 0x004fe2000c101b04 */
/*0400*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0410*/ BRA 0x410; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0480*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0490*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
/*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/
/*(c) 2016 Brian Tarasinski*/
/*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/
//kernel to transform to pauli basis (up, x, y, down)
//to be run on a complete complex density matrix, once for each bit
//this operation is its own inverse (can also be used in opposite direction)
__global__ void get_diag(double *dm9, double *out, unsigned int no_qubits) {
int x = (blockIdx.x *blockDim.x) + threadIdx.x;
if (x >= (1 << no_qubits)) return;
unsigned int addr_real = 0;
for (int i = 0; i < 16; i++) {
addr_real |= (x & 1U << i) << i | (x & 1U << i) << (i + 1);
}
out[x] = dm9[addr_real];
} | .file "tmpxft_000f05a9_00000000-6_get_diag.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z8get_diagPdS_jPdS_j
.type _Z30__device_stub__Z8get_diagPdS_jPdS_j, @function
_Z30__device_stub__Z8get_diagPdS_jPdS_j:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8get_diagPdS_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z30__device_stub__Z8get_diagPdS_jPdS_j, .-_Z30__device_stub__Z8get_diagPdS_jPdS_j
.globl _Z8get_diagPdS_j
.type _Z8get_diagPdS_j, @function
_Z8get_diagPdS_j:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z8get_diagPdS_jPdS_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z8get_diagPdS_j, .-_Z8get_diagPdS_j
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z8get_diagPdS_j"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8get_diagPdS_j(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
/*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/
/*(c) 2016 Brian Tarasinski*/
/*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/
//kernel to transform to pauli basis (up, x, y, down)
//to be run on a complete complex density matrix, once for each bit
//this operation is its own inverse (can also be used in opposite direction)
__global__ void get_diag(double *dm9, double *out, unsigned int no_qubits) {
int x = (blockIdx.x *blockDim.x) + threadIdx.x;
if (x >= (1 << no_qubits)) return;
unsigned int addr_real = 0;
for (int i = 0; i < 16; i++) {
addr_real |= (x & 1U << i) << i | (x & 1U << i) << (i + 1);
}
out[x] = dm9[addr_real];
} | #include <hip/hip_runtime.h>
#include "includes.h"
/*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/
/*(c) 2016 Brian Tarasinski*/
/*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/
//kernel to transform to pauli basis (up, x, y, down)
//to be run on a complete complex density matrix, once for each bit
//this operation is its own inverse (can also be used in opposite direction)
__global__ void get_diag(double *dm9, double *out, unsigned int no_qubits) {
int x = (blockIdx.x *blockDim.x) + threadIdx.x;
if (x >= (1 << no_qubits)) return;
unsigned int addr_real = 0;
for (int i = 0; i < 16; i++) {
addr_real |= (x & 1U << i) << i | (x & 1U << i) << (i + 1);
}
out[x] = dm9[addr_real];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/
/*(c) 2016 Brian Tarasinski*/
/*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/
//kernel to transform to pauli basis (up, x, y, down)
//to be run on a complete complex density matrix, once for each bit
//this operation is its own inverse (can also be used in opposite direction)
__global__ void get_diag(double *dm9, double *out, unsigned int no_qubits) {
int x = (blockIdx.x *blockDim.x) + threadIdx.x;
if (x >= (1 << no_qubits)) return;
unsigned int addr_real = 0;
for (int i = 0; i < 16; i++) {
addr_real |= (x & 1U << i) << i | (x & 1U << i) << (i + 1);
}
out[x] = dm9[addr_real];
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8get_diagPdS_j
.globl _Z8get_diagPdS_j
.p2align 8
.type _Z8get_diagPdS_j,@function
_Z8get_diagPdS_j:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_lshl_b32 s2, 1, s3
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v1
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_4
v_mov_b32_e32 v2, 0
s_mov_b32 s2, 0
.LBB0_2:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b32 s3, 1, s2
v_and_b32_e32 v0, s3, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_lshlrev_b32_e32 v3, s2, v0
s_add_i32 s2, s2, 1
v_lshlrev_b32_e32 v0, s2, v0
s_cmp_eq_u32 s2, 16
s_delay_alu instid0(VALU_DEP_1)
v_or3_b32 v2, v0, v2, v3
s_cbranch_scc0 .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_mov_b32_e32 v3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 3, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_load_b64 v[3:4], v[2:3], off
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[0:1], 3, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(0)
global_store_b64 v[0:1], v[3:4], off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8get_diagPdS_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8get_diagPdS_j, .Lfunc_end0-_Z8get_diagPdS_j
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8get_diagPdS_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8get_diagPdS_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/
/*(c) 2016 Brian Tarasinski*/
/*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/
//kernel to transform to pauli basis (up, x, y, down)
//to be run on a complete complex density matrix, once for each bit
//this operation is its own inverse (can also be used in opposite direction)
__global__ void get_diag(double *dm9, double *out, unsigned int no_qubits) {
int x = (blockIdx.x *blockDim.x) + threadIdx.x;
if (x >= (1 << no_qubits)) return;
unsigned int addr_real = 0;
for (int i = 0; i < 16; i++) {
addr_real |= (x & 1U << i) << i | (x & 1U << i) << (i + 1);
}
out[x] = dm9[addr_real];
} | .text
.file "get_diag.hip"
.globl _Z23__device_stub__get_diagPdS_j # -- Begin function _Z23__device_stub__get_diagPdS_j
.p2align 4, 0x90
.type _Z23__device_stub__get_diagPdS_j,@function
_Z23__device_stub__get_diagPdS_j: # @_Z23__device_stub__get_diagPdS_j
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8get_diagPdS_j, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z23__device_stub__get_diagPdS_j, .Lfunc_end0-_Z23__device_stub__get_diagPdS_j
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8get_diagPdS_j, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8get_diagPdS_j,@object # @_Z8get_diagPdS_j
.section .rodata,"a",@progbits
.globl _Z8get_diagPdS_j
.p2align 3, 0x0
_Z8get_diagPdS_j:
.quad _Z23__device_stub__get_diagPdS_j
.size _Z8get_diagPdS_j, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8get_diagPdS_j"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__get_diagPdS_j
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8get_diagPdS_j
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z8get_diagPdS_j
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ UMOV UR4, 0x1 ; /* 0x0000000100047882 */
/* 0x000fe40000000000 */
/*0030*/ ULDC UR5, c[0x0][0x170] ; /* 0x00005c0000057ab9 */
/* 0x000fe20000000800 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0050*/ USHF.L.U32 UR4, UR4, UR5, URZ ; /* 0x0000000504047299 */
/* 0x000fe2000800063f */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0070*/ ISETP.GE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fda000bf06270 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ LOP3.LUT R2, R0.reuse, 0x1, RZ, 0xc0, !PT ; /* 0x0000000100027812 */
/* 0x040fe200078ec0ff */
/*00a0*/ IMAD.SHL.U32 R5, R0.reuse, 0x4, RZ ; /* 0x0000000400057824 */
/* 0x040fe200078e00ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ IMAD.SHL.U32 R7, R0, 0x8, RZ ; /* 0x0000000800077824 */
/* 0x000fe400078e00ff */
/*00d0*/ IMAD.SHL.U32 R3, R2, 0x2, RZ ; /* 0x0000000202037824 */
/* 0x000fca00078e00ff */
/*00e0*/ LOP3.LUT R2, R3, 0x2, R2, 0xe2, !PT ; /* 0x0000000203027812 */
/* 0x000fe200078ee202 */
/*00f0*/ IMAD.SHL.U32 R3, R0, 0x2, RZ ; /* 0x0000000200037824 */
/* 0x000fc600078e00ff */
/*0100*/ LOP3.LUT R2, R2, 0x8, R5, 0xf8, !PT ; /* 0x0000000802027812 */
/* 0x000fc800078ef805 */
/*0110*/ LOP3.LUT R2, R2, 0x4, R3, 0xf8, !PT ; /* 0x0000000402027812 */
/* 0x000fe200078ef803 */
/*0120*/ IMAD.SHL.U32 R3, R0, 0x10, RZ ; /* 0x0000001000037824 */
/* 0x000fc600078e00ff */
/*0130*/ LOP3.LUT R2, R2, 0x20, R7, 0xf8, !PT ; /* 0x0000002002027812 */
/* 0x000fc800078ef807 */
/*0140*/ LOP3.LUT R2, R2, 0x10, R5, 0xf8, !PT ; /* 0x0000001002027812 */
/* 0x000fe200078ef805 */
/*0150*/ IMAD.SHL.U32 R5, R0, 0x20, RZ ; /* 0x0000002000057824 */
/* 0x000fc600078e00ff */
/*0160*/ LOP3.LUT R2, R2, 0x80, R3, 0xf8, !PT ; /* 0x0000008002027812 */
/* 0x000fc800078ef803 */
/*0170*/ LOP3.LUT R2, R2, 0x40, R7, 0xf8, !PT ; /* 0x0000004002027812 */
/* 0x000fe200078ef807 */
/*0180*/ IMAD.SHL.U32 R7, R0, 0x40, RZ ; /* 0x0000004000077824 */
/* 0x000fc600078e00ff */
/*0190*/ LOP3.LUT R2, R2, 0x200, R5, 0xf8, !PT ; /* 0x0000020002027812 */
/* 0x000fc800078ef805 */
/*01a0*/ LOP3.LUT R2, R2, 0x100, R3, 0xf8, !PT ; /* 0x0000010002027812 */
/* 0x000fe200078ef803 */
/*01b0*/ IMAD.SHL.U32 R3, R0, 0x80, RZ ; /* 0x0000008000037824 */
/* 0x000fc600078e00ff */
/*01c0*/ LOP3.LUT R2, R2, 0x800, R7, 0xf8, !PT ; /* 0x0000080002027812 */
/* 0x000fc800078ef807 */
/*01d0*/ LOP3.LUT R2, R2, 0x400, R5, 0xf8, !PT ; /* 0x0000040002027812 */
/* 0x000fe200078ef805 */
/*01e0*/ IMAD.SHL.U32 R5, R0, 0x100, RZ ; /* 0x0000010000057824 */
/* 0x000fc600078e00ff */
/*01f0*/ LOP3.LUT R2, R2, 0x2000, R3, 0xf8, !PT ; /* 0x0000200002027812 */
/* 0x000fc800078ef803 */
/*0200*/ LOP3.LUT R2, R2, 0x1000, R7, 0xf8, !PT ; /* 0x0000100002027812 */
/* 0x000fe200078ef807 */
/*0210*/ IMAD.SHL.U32 R7, R0, 0x200, RZ ; /* 0x0000020000077824 */
/* 0x000fc600078e00ff */
/*0220*/ LOP3.LUT R2, R2, 0x8000, R5, 0xf8, !PT ; /* 0x0000800002027812 */
/* 0x000fc800078ef805 */
/*0230*/ LOP3.LUT R2, R2, 0x4000, R3, 0xf8, !PT ; /* 0x0000400002027812 */
/* 0x000fe200078ef803 */
/*0240*/ IMAD.SHL.U32 R3, R0, 0x400, RZ ; /* 0x0000040000037824 */
/* 0x000fc600078e00ff */
/*0250*/ LOP3.LUT R2, R2, 0x20000, R7, 0xf8, !PT ; /* 0x0002000002027812 */
/* 0x000fc800078ef807 */
/*0260*/ LOP3.LUT R2, R2, 0x10000, R5, 0xf8, !PT ; /* 0x0001000002027812 */
/* 0x000fe200078ef805 */
/*0270*/ IMAD.SHL.U32 R5, R0, 0x800, RZ ; /* 0x0000080000057824 */
/* 0x000fc600078e00ff */
/*0280*/ LOP3.LUT R2, R2, 0x80000, R3, 0xf8, !PT ; /* 0x0008000002027812 */
/* 0x000fc800078ef803 */
/*0290*/ LOP3.LUT R2, R2, 0x40000, R7, 0xf8, !PT ; /* 0x0004000002027812 */
/* 0x000fe200078ef807 */
/*02a0*/ IMAD.SHL.U32 R7, R0, 0x1000, RZ ; /* 0x0000100000077824 */
/* 0x000fc600078e00ff */
/*02b0*/ LOP3.LUT R2, R2, 0x200000, R5, 0xf8, !PT ; /* 0x0020000002027812 */
/* 0x000fc800078ef805 */
/*02c0*/ LOP3.LUT R2, R2, 0x100000, R3, 0xf8, !PT ; /* 0x0010000002027812 */
/* 0x000fe200078ef803 */
/*02d0*/ IMAD.SHL.U32 R3, R0, 0x2000, RZ ; /* 0x0000200000037824 */
/* 0x000fc600078e00ff */
/*02e0*/ LOP3.LUT R2, R2, 0x800000, R7, 0xf8, !PT ; /* 0x0080000002027812 */
/* 0x000fc800078ef807 */
/*02f0*/ LOP3.LUT R2, R2, 0x400000, R5, 0xf8, !PT ; /* 0x0040000002027812 */
/* 0x000fe200078ef805 */
/*0300*/ IMAD.SHL.U32 R5, R0, 0x4000, RZ ; /* 0x0000400000057824 */
/* 0x000fc600078e00ff */
/*0310*/ LOP3.LUT R2, R2, 0x2000000, R3, 0xf8, !PT ; /* 0x0200000002027812 */
/* 0x000fc800078ef803 */
/*0320*/ LOP3.LUT R2, R2, 0x1000000, R7, 0xf8, !PT ; /* 0x0100000002027812 */
/* 0x000fe200078ef807 */
/*0330*/ IMAD.SHL.U32 R7, R0, 0x8000, RZ ; /* 0x0000800000077824 */
/* 0x000fc600078e00ff */
/*0340*/ LOP3.LUT R2, R2, 0x8000000, R5, 0xf8, !PT ; /* 0x0800000002027812 */
/* 0x000fc800078ef805 */
/*0350*/ LOP3.LUT R2, R2, 0x4000000, R3, 0xf8, !PT ; /* 0x0400000002027812 */
/* 0x000fe200078ef803 */
/*0360*/ IMAD.U32 R3, R0, 0x10000, RZ ; /* 0x0001000000037824 */
/* 0x000fc600078e00ff */
/*0370*/ LOP3.LUT R2, R2, 0x20000000, R7, 0xf8, !PT ; /* 0x2000000002027812 */
/* 0x000fc800078ef807 */
/*0380*/ LOP3.LUT R2, R2, 0x10000000, R5, 0xf8, !PT ; /* 0x1000000002027812 */
/* 0x000fe200078ef805 */
/*0390*/ IMAD.MOV.U32 R5, RZ, RZ, 0x8 ; /* 0x00000008ff057424 */
/* 0x000fc600078e00ff */
/*03a0*/ LOP3.LUT R2, R2, 0x80000000, R3, 0xf8, !PT ; /* 0x8000000002027812 */
/* 0x000fc800078ef803 */
/*03b0*/ LOP3.LUT R2, R2, 0x40000000, R7, 0xf8, !PT ; /* 0x4000000002027812 */
/* 0x000fca00078ef807 */
/*03c0*/ IMAD.WIDE.U32 R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0005 */
/*03d0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*03e0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fca00078e0205 */
/*03f0*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */
/* 0x004fe2000c101b04 */
/*0400*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0410*/ BRA 0x410; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0480*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0490*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8get_diagPdS_j
.globl _Z8get_diagPdS_j
.p2align 8
.type _Z8get_diagPdS_j,@function
_Z8get_diagPdS_j:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_lshl_b32 s2, 1, s3
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v1
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_4
v_mov_b32_e32 v2, 0
s_mov_b32 s2, 0
.LBB0_2:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b32 s3, 1, s2
v_and_b32_e32 v0, s3, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_lshlrev_b32_e32 v3, s2, v0
s_add_i32 s2, s2, 1
v_lshlrev_b32_e32 v0, s2, v0
s_cmp_eq_u32 s2, 16
s_delay_alu instid0(VALU_DEP_1)
v_or3_b32 v2, v0, v2, v3
s_cbranch_scc0 .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_mov_b32_e32 v3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 3, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_load_b64 v[3:4], v[2:3], off
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[0:1], 3, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(0)
global_store_b64 v[0:1], v[3:4], off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8get_diagPdS_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8get_diagPdS_j, .Lfunc_end0-_Z8get_diagPdS_j
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8get_diagPdS_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8get_diagPdS_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000f05a9_00000000-6_get_diag.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z8get_diagPdS_jPdS_j
.type _Z30__device_stub__Z8get_diagPdS_jPdS_j, @function
_Z30__device_stub__Z8get_diagPdS_jPdS_j:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8get_diagPdS_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z30__device_stub__Z8get_diagPdS_jPdS_j, .-_Z30__device_stub__Z8get_diagPdS_jPdS_j
.globl _Z8get_diagPdS_j
.type _Z8get_diagPdS_j, @function
_Z8get_diagPdS_j:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z8get_diagPdS_jPdS_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z8get_diagPdS_j, .-_Z8get_diagPdS_j
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z8get_diagPdS_j"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8get_diagPdS_j(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "get_diag.hip"
.globl _Z23__device_stub__get_diagPdS_j # -- Begin function _Z23__device_stub__get_diagPdS_j
.p2align 4, 0x90
.type _Z23__device_stub__get_diagPdS_j,@function
_Z23__device_stub__get_diagPdS_j: # @_Z23__device_stub__get_diagPdS_j
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8get_diagPdS_j, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z23__device_stub__get_diagPdS_j, .Lfunc_end0-_Z23__device_stub__get_diagPdS_j
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8get_diagPdS_j, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8get_diagPdS_j,@object # @_Z8get_diagPdS_j
.section .rodata,"a",@progbits
.globl _Z8get_diagPdS_j
.p2align 3, 0x0
_Z8get_diagPdS_j:
.quad _Z23__device_stub__get_diagPdS_j
.size _Z8get_diagPdS_j, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8get_diagPdS_j"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__get_diagPdS_j
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8get_diagPdS_j
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#include <time.h>
#include <math.h>
#include <stdbool.h>
// cuda macro for ensuring cuda errors are logged
#define __cuda__(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr, "CUDA-Assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/* KERNEL: Set up curand environment for populating matrix with pseudorandom values
*/
__global__ void cuda_rand_init(curandState *state, unsigned int size, int seed) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < size;
idx += blockDim.x * gridDim.x
){
curand_init(seed, idx, 0, &state[idx]);
}
}
/* KERNEL: Populate matrix with pseudorandom values
*/
__global__ void cuda_rand(curandState *state, float *matrix, unsigned int size) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < size;
idx += blockDim.x * gridDim.x
){
matrix[idx] = curand_uniform(&state[idx])*10.0f;
}
}
/* Populate initial solution matrix with pseudorandom values between 0 and 10
*/
void matrixRandomPopulate(float* matrix, int m, int s, int blocks, int threads) {
float* cuda_matrix;
curandState* cuda_state;
__cuda__( cudaMalloc(&cuda_matrix, m*s*sizeof(float)) );
__cuda__( cudaMalloc(&cuda_state, m*s*sizeof(curandState)) );
// initialize curand state with pseudorandom value for different initial pseudorandom solutions across executions
srand(time(NULL));
cuda_rand_init<<<blocks, threads>>>(cuda_state, m*s, (float)rand()/((float)RAND_MAX/10.0f));
// populate initial solution matrix with pseudorandom values
cuda_rand<<<blocks, threads>>>(cuda_state, cuda_matrix, m*s);
__cuda__( cudaMemcpy(matrix, cuda_matrix, m*s*sizeof(float), cudaMemcpyDeviceToHost) );
__cuda__( cudaFree(cuda_matrix) );
__cuda__( cudaFree(cuda_state) );
}
/* Populate initial solution matrix with pseudorandom values between 0 and 10
*/
void matrixRandomPopulateSerial(float* matrix, int m, int s) {
srand(time(NULL));
for (int i = 0; i < (m * s); i++) {
matrix[i] = (float)(rand() % 10); ///rand() / (1.0f + RAND_MAX) * 10.0f;
}
} | .file "tmpxft_0017144f_00000000-6_random.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2276:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2276:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26matrixRandomPopulateSerialPfii
.type _Z26matrixRandomPopulateSerialPfii, @function
_Z26matrixRandomPopulateSerialPfii:
.LFB2273:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %rbp
movl %esi, %ebx
movl %edx, %r12d
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movl %ebx, %esi
imull %r12d, %esi
testl %esi, %esi
jle .L3
movq %rbp, %rbx
movslq %esi, %rsi
leaq 0(%rbp,%rsi,4), %rbp
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
.L3:
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2273:
.size _Z26matrixRandomPopulateSerialPfii, .-_Z26matrixRandomPopulateSerialPfii
.globl _Z54__device_stub__Z14cuda_rand_initP17curandStateXORWOWjiP17curandStateXORWOWji
.type _Z54__device_stub__Z14cuda_rand_initP17curandStateXORWOWjiP17curandStateXORWOWji, @function
_Z54__device_stub__Z14cuda_rand_initP17curandStateXORWOWjiP17curandStateXORWOWji:
.LFB2298:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L12
.L8:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L13
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z14cuda_rand_initP17curandStateXORWOWji(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L8
.L13:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2298:
.size _Z54__device_stub__Z14cuda_rand_initP17curandStateXORWOWjiP17curandStateXORWOWji, .-_Z54__device_stub__Z14cuda_rand_initP17curandStateXORWOWjiP17curandStateXORWOWji
.globl _Z14cuda_rand_initP17curandStateXORWOWji
.type _Z14cuda_rand_initP17curandStateXORWOWji, @function
_Z14cuda_rand_initP17curandStateXORWOWji:
.LFB2299:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z54__device_stub__Z14cuda_rand_initP17curandStateXORWOWjiP17curandStateXORWOWji
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2299:
.size _Z14cuda_rand_initP17curandStateXORWOWji, .-_Z14cuda_rand_initP17curandStateXORWOWji
.globl _Z49__device_stub__Z9cuda_randP17curandStateXORWOWPfjP17curandStateXORWOWPfj
.type _Z49__device_stub__Z9cuda_randP17curandStateXORWOWPfjP17curandStateXORWOWPfj, @function
_Z49__device_stub__Z9cuda_randP17curandStateXORWOWPfjP17curandStateXORWOWPfj:
.LFB2300:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9cuda_randP17curandStateXORWOWPfj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2300:
.size _Z49__device_stub__Z9cuda_randP17curandStateXORWOWPfjP17curandStateXORWOWPfj, .-_Z49__device_stub__Z9cuda_randP17curandStateXORWOWPfjP17curandStateXORWOWPfj
.globl _Z9cuda_randP17curandStateXORWOWPfj
.type _Z9cuda_randP17curandStateXORWOWPfj, @function
_Z9cuda_randP17curandStateXORWOWPfj:
.LFB2301:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z49__device_stub__Z9cuda_randP17curandStateXORWOWPfjP17curandStateXORWOWPfj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2301:
.size _Z9cuda_randP17curandStateXORWOWPfj, .-_Z9cuda_randP17curandStateXORWOWPfj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "/home/ubuntu/Datasets/stackv2/train-structured/michael-riess/cuda-da-smacof/master/libs/random/random.cu"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "CUDA-Assert: %s %s %d\n"
.text
.globl _Z20matrixRandomPopulatePfiiii
.type _Z20matrixRandomPopulatePfiiii, @function
_Z20matrixRandomPopulatePfiiii:
.LFB2272:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %ecx, %r12d
movl %r8d, %r13d
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
imull %edx, %esi
movl %esi, %ebp
movslq %esi, %r14
leaq 0(,%r14,4), %r15
leaq 16(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L34
leaq (%r14,%r14,2), %rsi
salq $4, %rsi
leaq 24(%rsp), %rdi
call cudaMalloc@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L35
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movl %r13d, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl %r12d, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L36
.L27:
movl %r13d, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl %r12d, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L37
.L28:
movl $2, %ecx
movq %r15, %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L38
movq 16(%rsp), %rdi
call cudaFree@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L39
movq 24(%rsp), %rdi
call cudaFree@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L40
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L41
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L34:
.cfi_restore_state
movl %eax, %ebx
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl $50, %r9d
leaq .LC0(%rip), %r8
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl %ebx, %edi
call exit@PLT
.L35:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl $51, %r9d
leaq .LC0(%rip), %r8
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl %ebx, %edi
call exit@PLT
.L36:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC2(%rip), %xmm0
cvttss2sil %xmm0, %edx
movl %ebp, %esi
movq 24(%rsp), %rdi
call _Z54__device_stub__Z14cuda_rand_initP17curandStateXORWOWjiP17curandStateXORWOWji
jmp .L27
.L37:
movl %ebp, %edx
movq 16(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z49__device_stub__Z9cuda_randP17curandStateXORWOWPfjP17curandStateXORWOWPfj
jmp .L28
.L38:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl $59, %r9d
leaq .LC0(%rip), %r8
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl %ebx, %edi
call exit@PLT
.L39:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl $60, %r9d
leaq .LC0(%rip), %r8
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl %ebx, %edi
call exit@PLT
.L40:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl $61, %r9d
leaq .LC0(%rip), %r8
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl %ebx, %edi
call exit@PLT
.L41:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2272:
.size _Z20matrixRandomPopulatePfiiii, .-_Z20matrixRandomPopulatePfiiii
.section .rodata.str1.8
.align 8
.LC3:
.string "_Z9cuda_randP17curandStateXORWOWPfj"
.align 8
.LC4:
.string "_Z14cuda_rand_initP17curandStateXORWOWji"
.section .rodata.str1.1
.LC5:
.string "precalc_xorwow_matrix"
.LC6:
.string "precalc_xorwow_offset_matrix"
.LC7:
.string "mrg32k3aM1"
.LC8:
.string "mrg32k3aM2"
.LC9:
.string "mrg32k3aM1SubSeq"
.LC10:
.string "mrg32k3aM2SubSeq"
.LC11:
.string "mrg32k3aM1Seq"
.LC12:
.string "mrg32k3aM2Seq"
.LC13:
.string "__cr_lgamma_table"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2303:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9cuda_randP17curandStateXORWOWPfj(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z14cuda_rand_initP17curandStateXORWOWji(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21precalc_xorwow_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM1(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM2(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM1Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM2Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $72, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17__cr_lgamma_table(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2303:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL17__cr_lgamma_table
.comm _ZL17__cr_lgamma_table,72,32
.local _ZL13mrg32k3aM2Seq
.comm _ZL13mrg32k3aM2Seq,2304,32
.local _ZL13mrg32k3aM1Seq
.comm _ZL13mrg32k3aM1Seq,2304,32
.local _ZL16mrg32k3aM2SubSeq
.comm _ZL16mrg32k3aM2SubSeq,2016,32
.local _ZL16mrg32k3aM1SubSeq
.comm _ZL16mrg32k3aM1SubSeq,2016,32
.local _ZL10mrg32k3aM2
.comm _ZL10mrg32k3aM2,2304,32
.local _ZL10mrg32k3aM1
.comm _ZL10mrg32k3aM1,2304,32
.local _ZL28precalc_xorwow_offset_matrix
.comm _ZL28precalc_xorwow_offset_matrix,102400,32
.local _ZL21precalc_xorwow_matrix
.comm _ZL21precalc_xorwow_matrix,102400,32
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 1296878797
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#include <time.h>
#include <math.h>
#include <stdbool.h>
// cuda macro for ensuring cuda errors are logged
#define __cuda__(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr, "CUDA-Assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/* KERNEL: Set up curand environment for populating matrix with pseudorandom values
*/
__global__ void cuda_rand_init(curandState *state, unsigned int size, int seed) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < size;
idx += blockDim.x * gridDim.x
){
curand_init(seed, idx, 0, &state[idx]);
}
}
/* KERNEL: Populate matrix with pseudorandom values
*/
__global__ void cuda_rand(curandState *state, float *matrix, unsigned int size) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < size;
idx += blockDim.x * gridDim.x
){
matrix[idx] = curand_uniform(&state[idx])*10.0f;
}
}
/* Populate initial solution matrix with pseudorandom values between 0 and 10
*/
void matrixRandomPopulate(float* matrix, int m, int s, int blocks, int threads) {
float* cuda_matrix;
curandState* cuda_state;
__cuda__( cudaMalloc(&cuda_matrix, m*s*sizeof(float)) );
__cuda__( cudaMalloc(&cuda_state, m*s*sizeof(curandState)) );
// initialize curand state with pseudorandom value for different initial pseudorandom solutions across executions
srand(time(NULL));
cuda_rand_init<<<blocks, threads>>>(cuda_state, m*s, (float)rand()/((float)RAND_MAX/10.0f));
// populate initial solution matrix with pseudorandom values
cuda_rand<<<blocks, threads>>>(cuda_state, cuda_matrix, m*s);
__cuda__( cudaMemcpy(matrix, cuda_matrix, m*s*sizeof(float), cudaMemcpyDeviceToHost) );
__cuda__( cudaFree(cuda_matrix) );
__cuda__( cudaFree(cuda_state) );
}
/* Populate initial solution matrix with pseudorandom values between 0 and 10
*/
void matrixRandomPopulateSerial(float* matrix, int m, int s) {
srand(time(NULL));
for (int i = 0; i < (m * s); i++) {
matrix[i] = (float)(rand() % 10); ///rand() / (1.0f + RAND_MAX) * 10.0f;
}
} | #include <stdio.h>
#include <stdlib.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <math.h>
#include <stdbool.h>
// cuda macro for ensuring cuda errors are logged
#define __cuda__(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true) {
if (code != hipSuccess) {
fprintf(stderr, "CUDA-Assert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/* KERNEL: Set up curand environment for populating matrix with pseudorandom values
*/
__global__ void cuda_rand_init(hiprandState *state, unsigned int size, int seed) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < size;
idx += blockDim.x * gridDim.x
){
hiprand_init(seed, idx, 0, &state[idx]);
}
}
/* KERNEL: Populate matrix with pseudorandom values
*/
__global__ void cuda_rand(hiprandState *state, float *matrix, unsigned int size) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < size;
idx += blockDim.x * gridDim.x
){
matrix[idx] = hiprand_uniform(&state[idx])*10.0f;
}
}
/* Populate initial solution matrix with pseudorandom values between 0 and 10
*/
void matrixRandomPopulate(float* matrix, int m, int s, int blocks, int threads) {
float* cuda_matrix;
hiprandState* cuda_state;
__cuda__( hipMalloc(&cuda_matrix, m*s*sizeof(float)) );
__cuda__( hipMalloc(&cuda_state, m*s*sizeof(hiprandState)) );
// initialize curand state with pseudorandom value for different initial pseudorandom solutions across executions
srand(time(NULL));
cuda_rand_init<<<blocks, threads>>>(cuda_state, m*s, (float)rand()/((float)RAND_MAX/10.0f));
// populate initial solution matrix with pseudorandom values
cuda_rand<<<blocks, threads>>>(cuda_state, cuda_matrix, m*s);
__cuda__( hipMemcpy(matrix, cuda_matrix, m*s*sizeof(float), hipMemcpyDeviceToHost) );
__cuda__( hipFree(cuda_matrix) );
__cuda__( hipFree(cuda_state) );
}
/* Populate initial solution matrix with pseudorandom values between 0 and 10
*/
void matrixRandomPopulateSerial(float* matrix, int m, int s) {
srand(time(NULL));
for (int i = 0; i < (m * s); i++) {
matrix[i] = (float)(rand() % 10); ///rand() / (1.0f + RAND_MAX) * 10.0f;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <math.h>
#include <stdbool.h>
// cuda macro for ensuring cuda errors are logged
#define __cuda__(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true) {
if (code != hipSuccess) {
fprintf(stderr, "CUDA-Assert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/* KERNEL: Set up curand environment for populating matrix with pseudorandom values
*/
__global__ void cuda_rand_init(hiprandState *state, unsigned int size, int seed) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < size;
idx += blockDim.x * gridDim.x
){
hiprand_init(seed, idx, 0, &state[idx]);
}
}
/* KERNEL: Populate matrix with pseudorandom values
*/
__global__ void cuda_rand(hiprandState *state, float *matrix, unsigned int size) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < size;
idx += blockDim.x * gridDim.x
){
matrix[idx] = hiprand_uniform(&state[idx])*10.0f;
}
}
/* Populate initial solution matrix with pseudorandom values between 0 and 10
*/
void matrixRandomPopulate(float* matrix, int m, int s, int blocks, int threads) {
float* cuda_matrix;
hiprandState* cuda_state;
__cuda__( hipMalloc(&cuda_matrix, m*s*sizeof(float)) );
__cuda__( hipMalloc(&cuda_state, m*s*sizeof(hiprandState)) );
// initialize curand state with pseudorandom value for different initial pseudorandom solutions across executions
srand(time(NULL));
cuda_rand_init<<<blocks, threads>>>(cuda_state, m*s, (float)rand()/((float)RAND_MAX/10.0f));
// populate initial solution matrix with pseudorandom values
cuda_rand<<<blocks, threads>>>(cuda_state, cuda_matrix, m*s);
__cuda__( hipMemcpy(matrix, cuda_matrix, m*s*sizeof(float), hipMemcpyDeviceToHost) );
__cuda__( hipFree(cuda_matrix) );
__cuda__( hipFree(cuda_state) );
}
/* Populate initial solution matrix with pseudorandom values between 0 and 10
*/
void matrixRandomPopulateSerial(float* matrix, int m, int s) {
srand(time(NULL));
for (int i = 0; i < (m * s); i++) {
matrix[i] = (float)(rand() % 10); ///rand() / (1.0f + RAND_MAX) * 10.0f;
}
} | .text
.file "random.hip"
.globl _Z29__device_stub__cuda_rand_initP12hiprandStateji # -- Begin function _Z29__device_stub__cuda_rand_initP12hiprandStateji
.p2align 4, 0x90
.type _Z29__device_stub__cuda_rand_initP12hiprandStateji,@function
_Z29__device_stub__cuda_rand_initP12hiprandStateji: # @_Z29__device_stub__cuda_rand_initP12hiprandStateji
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z14cuda_rand_initP12hiprandStateji, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z29__device_stub__cuda_rand_initP12hiprandStateji, .Lfunc_end0-_Z29__device_stub__cuda_rand_initP12hiprandStateji
.cfi_endproc
# -- End function
.globl _Z24__device_stub__cuda_randP12hiprandStatePfj # -- Begin function _Z24__device_stub__cuda_randP12hiprandStatePfj
.p2align 4, 0x90
.type _Z24__device_stub__cuda_randP12hiprandStatePfj,@function
_Z24__device_stub__cuda_randP12hiprandStatePfj: # @_Z24__device_stub__cuda_randP12hiprandStatePfj
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9cuda_randP12hiprandStatePfj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z24__device_stub__cuda_randP12hiprandStatePfj, .Lfunc_end1-_Z24__device_stub__cuda_randP12hiprandStatePfj
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z20matrixRandomPopulatePfiiii
.LCPI2_0:
.long 0x4d4ccccd # float 214748368
.text
.globl _Z20matrixRandomPopulatePfiiii
.p2align 4, 0x90
.type _Z20matrixRandomPopulatePfiiii,@function
_Z20matrixRandomPopulatePfiiii: # @_Z20matrixRandomPopulatePfiiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r8d, %r12d
movl %ecx, %r15d
movl %esi, %ebp
movq %rdi, %rbx
imull %edx, %ebp
movslq %ebp, %r13
leaq (,%r13,4), %r14
leaq 24(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB2_1
# %bb.3: # %_Z10cudaAssert10hipError_tPKcib.exit
shlq $4, %r13
leaq (,%r13,2), %rsi
addq %r13, %rsi
leaq 16(%rsp), %rdi
callq hipMalloc
testl %eax, %eax
jne .LBB2_4
# %bb.5: # %_Z10cudaAssert10hipError_tPKcib.exit22
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
movl %r15d, %r15d
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %r15
movl %r12d, %r12d
orq %rax, %r12
movq %r15, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_7
# %bb.6:
movq 16(%rsp), %r13
callq rand
cvtsi2ss %eax, %xmm0
divss .LCPI2_0(%rip), %xmm0
cvttss2si %xmm0, %eax
movq %r13, 88(%rsp)
movl %ebp, 32(%rsp)
movl %eax, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 32(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z14cuda_rand_initP12hiprandStateji, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_7:
movq %r15, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_9
# %bb.8:
movq 16(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl %ebp, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9cuda_randP12hiprandStatePfj, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_9:
movq 24(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_10
# %bb.11: # %_Z10cudaAssert10hipError_tPKcib.exit30
movq 24(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB2_12
# %bb.13: # %_Z10cudaAssert10hipError_tPKcib.exit32
movq 16(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB2_14
# %bb.15: # %_Z10cudaAssert10hipError_tPKcib.exit34
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_1:
.cfi_def_cfa_offset 176
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $50, %r8d
jmp .LBB2_2
.LBB2_4:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $51, %r8d
jmp .LBB2_2
.LBB2_10:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $59, %r8d
jmp .LBB2_2
.LBB2_12:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $60, %r8d
jmp .LBB2_2
.LBB2_14:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $61, %r8d
.LBB2_2:
xorl %eax, %eax
callq fprintf
movl %ebp, %edi
callq exit
.Lfunc_end2:
.size _Z20matrixRandomPopulatePfiiii, .Lfunc_end2-_Z20matrixRandomPopulatePfiiii
.cfi_endproc
# -- End function
.globl _Z26matrixRandomPopulateSerialPfii # -- Begin function _Z26matrixRandomPopulateSerialPfii
.p2align 4, 0x90
.type _Z26matrixRandomPopulateSerialPfii,@function
_Z26matrixRandomPopulateSerialPfii: # @_Z26matrixRandomPopulateSerialPfii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, %r14d
movl %esi, %ebp
movq %rdi, %rbx
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
imull %r14d, %ebp
testl %ebp, %ebp
jle .LBB3_3
# %bb.1: # %.lr.ph.preheader
movl %ebp, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB3_2
.LBB3_3: # %._crit_edge
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z26matrixRandomPopulateSerialPfii, .Lfunc_end3-_Z26matrixRandomPopulateSerialPfii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14cuda_rand_initP12hiprandStateji, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9cuda_randP12hiprandStatePfj, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14cuda_rand_initP12hiprandStateji,@object # @_Z14cuda_rand_initP12hiprandStateji
.section .rodata,"a",@progbits
.globl _Z14cuda_rand_initP12hiprandStateji
.p2align 3, 0x0
_Z14cuda_rand_initP12hiprandStateji:
.quad _Z29__device_stub__cuda_rand_initP12hiprandStateji
.size _Z14cuda_rand_initP12hiprandStateji, 8
.type _Z9cuda_randP12hiprandStatePfj,@object # @_Z9cuda_randP12hiprandStatePfj
.globl _Z9cuda_randP12hiprandStatePfj
.p2align 3, 0x0
_Z9cuda_randP12hiprandStatePfj:
.quad _Z24__device_stub__cuda_randP12hiprandStatePfj
.size _Z9cuda_randP12hiprandStatePfj, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/michael-riess/cuda-da-smacof/master/libs/random/random.hip"
.size .L.str, 116
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "CUDA-Assert: %s %s %d\n"
.size .L.str.1, 23
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14cuda_rand_initP12hiprandStateji"
.size .L__unnamed_1, 36
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z9cuda_randP12hiprandStatePfj"
.size .L__unnamed_2, 31
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__cuda_rand_initP12hiprandStateji
.addrsig_sym _Z24__device_stub__cuda_randP12hiprandStatePfj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14cuda_rand_initP12hiprandStateji
.addrsig_sym _Z9cuda_randP12hiprandStatePfj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0017144f_00000000-6_random.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2276:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2276:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26matrixRandomPopulateSerialPfii
.type _Z26matrixRandomPopulateSerialPfii, @function
_Z26matrixRandomPopulateSerialPfii:
.LFB2273:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %rbp
movl %esi, %ebx
movl %edx, %r12d
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movl %ebx, %esi
imull %r12d, %esi
testl %esi, %esi
jle .L3
movq %rbp, %rbx
movslq %esi, %rsi
leaq 0(%rbp,%rsi,4), %rbp
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
.L3:
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2273:
.size _Z26matrixRandomPopulateSerialPfii, .-_Z26matrixRandomPopulateSerialPfii
.globl _Z54__device_stub__Z14cuda_rand_initP17curandStateXORWOWjiP17curandStateXORWOWji
.type _Z54__device_stub__Z14cuda_rand_initP17curandStateXORWOWjiP17curandStateXORWOWji, @function
_Z54__device_stub__Z14cuda_rand_initP17curandStateXORWOWjiP17curandStateXORWOWji:
.LFB2298:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L12
.L8:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L13
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z14cuda_rand_initP17curandStateXORWOWji(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L8
.L13:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2298:
.size _Z54__device_stub__Z14cuda_rand_initP17curandStateXORWOWjiP17curandStateXORWOWji, .-_Z54__device_stub__Z14cuda_rand_initP17curandStateXORWOWjiP17curandStateXORWOWji
.globl _Z14cuda_rand_initP17curandStateXORWOWji
.type _Z14cuda_rand_initP17curandStateXORWOWji, @function
_Z14cuda_rand_initP17curandStateXORWOWji:
.LFB2299:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z54__device_stub__Z14cuda_rand_initP17curandStateXORWOWjiP17curandStateXORWOWji
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2299:
.size _Z14cuda_rand_initP17curandStateXORWOWji, .-_Z14cuda_rand_initP17curandStateXORWOWji
.globl _Z49__device_stub__Z9cuda_randP17curandStateXORWOWPfjP17curandStateXORWOWPfj
.type _Z49__device_stub__Z9cuda_randP17curandStateXORWOWPfjP17curandStateXORWOWPfj, @function
_Z49__device_stub__Z9cuda_randP17curandStateXORWOWPfjP17curandStateXORWOWPfj:
.LFB2300:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9cuda_randP17curandStateXORWOWPfj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2300:
.size _Z49__device_stub__Z9cuda_randP17curandStateXORWOWPfjP17curandStateXORWOWPfj, .-_Z49__device_stub__Z9cuda_randP17curandStateXORWOWPfjP17curandStateXORWOWPfj
.globl _Z9cuda_randP17curandStateXORWOWPfj
.type _Z9cuda_randP17curandStateXORWOWPfj, @function
_Z9cuda_randP17curandStateXORWOWPfj:
.LFB2301:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z49__device_stub__Z9cuda_randP17curandStateXORWOWPfjP17curandStateXORWOWPfj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2301:
.size _Z9cuda_randP17curandStateXORWOWPfj, .-_Z9cuda_randP17curandStateXORWOWPfj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "/home/ubuntu/Datasets/stackv2/train-structured/michael-riess/cuda-da-smacof/master/libs/random/random.cu"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "CUDA-Assert: %s %s %d\n"
.text
.globl _Z20matrixRandomPopulatePfiiii
.type _Z20matrixRandomPopulatePfiiii, @function
_Z20matrixRandomPopulatePfiiii:
.LFB2272:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %ecx, %r12d
movl %r8d, %r13d
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
imull %edx, %esi
movl %esi, %ebp
movslq %esi, %r14
leaq 0(,%r14,4), %r15
leaq 16(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L34
leaq (%r14,%r14,2), %rsi
salq $4, %rsi
leaq 24(%rsp), %rdi
call cudaMalloc@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L35
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movl %r13d, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl %r12d, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L36
.L27:
movl %r13d, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl %r12d, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L37
.L28:
movl $2, %ecx
movq %r15, %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L38
movq 16(%rsp), %rdi
call cudaFree@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L39
movq 24(%rsp), %rdi
call cudaFree@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L40
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L41
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L34:
.cfi_restore_state
movl %eax, %ebx
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl $50, %r9d
leaq .LC0(%rip), %r8
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl %ebx, %edi
call exit@PLT
.L35:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl $51, %r9d
leaq .LC0(%rip), %r8
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl %ebx, %edi
call exit@PLT
.L36:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC2(%rip), %xmm0
cvttss2sil %xmm0, %edx
movl %ebp, %esi
movq 24(%rsp), %rdi
call _Z54__device_stub__Z14cuda_rand_initP17curandStateXORWOWjiP17curandStateXORWOWji
jmp .L27
.L37:
movl %ebp, %edx
movq 16(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z49__device_stub__Z9cuda_randP17curandStateXORWOWPfjP17curandStateXORWOWPfj
jmp .L28
.L38:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl $59, %r9d
leaq .LC0(%rip), %r8
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl %ebx, %edi
call exit@PLT
.L39:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl $60, %r9d
leaq .LC0(%rip), %r8
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl %ebx, %edi
call exit@PLT
.L40:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl $61, %r9d
leaq .LC0(%rip), %r8
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl %ebx, %edi
call exit@PLT
.L41:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2272:
.size _Z20matrixRandomPopulatePfiiii, .-_Z20matrixRandomPopulatePfiiii
.section .rodata.str1.8
.align 8
.LC3:
.string "_Z9cuda_randP17curandStateXORWOWPfj"
.align 8
.LC4:
.string "_Z14cuda_rand_initP17curandStateXORWOWji"
.section .rodata.str1.1
.LC5:
.string "precalc_xorwow_matrix"
.LC6:
.string "precalc_xorwow_offset_matrix"
.LC7:
.string "mrg32k3aM1"
.LC8:
.string "mrg32k3aM2"
.LC9:
.string "mrg32k3aM1SubSeq"
.LC10:
.string "mrg32k3aM2SubSeq"
.LC11:
.string "mrg32k3aM1Seq"
.LC12:
.string "mrg32k3aM2Seq"
.LC13:
.string "__cr_lgamma_table"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2303:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9cuda_randP17curandStateXORWOWPfj(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z14cuda_rand_initP17curandStateXORWOWji(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21precalc_xorwow_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM1(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM2(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM1Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM2Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $72, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17__cr_lgamma_table(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2303:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL17__cr_lgamma_table
.comm _ZL17__cr_lgamma_table,72,32
.local _ZL13mrg32k3aM2Seq
.comm _ZL13mrg32k3aM2Seq,2304,32
.local _ZL13mrg32k3aM1Seq
.comm _ZL13mrg32k3aM1Seq,2304,32
.local _ZL16mrg32k3aM2SubSeq
.comm _ZL16mrg32k3aM2SubSeq,2016,32
.local _ZL16mrg32k3aM1SubSeq
.comm _ZL16mrg32k3aM1SubSeq,2016,32
.local _ZL10mrg32k3aM2
.comm _ZL10mrg32k3aM2,2304,32
.local _ZL10mrg32k3aM1
.comm _ZL10mrg32k3aM1,2304,32
.local _ZL28precalc_xorwow_offset_matrix
.comm _ZL28precalc_xorwow_offset_matrix,102400,32
.local _ZL21precalc_xorwow_matrix
.comm _ZL21precalc_xorwow_matrix,102400,32
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 1296878797
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "random.hip"
.globl _Z29__device_stub__cuda_rand_initP12hiprandStateji # -- Begin function _Z29__device_stub__cuda_rand_initP12hiprandStateji
.p2align 4, 0x90
.type _Z29__device_stub__cuda_rand_initP12hiprandStateji,@function
_Z29__device_stub__cuda_rand_initP12hiprandStateji: # @_Z29__device_stub__cuda_rand_initP12hiprandStateji
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z14cuda_rand_initP12hiprandStateji, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z29__device_stub__cuda_rand_initP12hiprandStateji, .Lfunc_end0-_Z29__device_stub__cuda_rand_initP12hiprandStateji
.cfi_endproc
# -- End function
.globl _Z24__device_stub__cuda_randP12hiprandStatePfj # -- Begin function _Z24__device_stub__cuda_randP12hiprandStatePfj
.p2align 4, 0x90
.type _Z24__device_stub__cuda_randP12hiprandStatePfj,@function
_Z24__device_stub__cuda_randP12hiprandStatePfj: # @_Z24__device_stub__cuda_randP12hiprandStatePfj
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9cuda_randP12hiprandStatePfj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z24__device_stub__cuda_randP12hiprandStatePfj, .Lfunc_end1-_Z24__device_stub__cuda_randP12hiprandStatePfj
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z20matrixRandomPopulatePfiiii
.LCPI2_0:
.long 0x4d4ccccd # float 214748368
.text
.globl _Z20matrixRandomPopulatePfiiii
.p2align 4, 0x90
.type _Z20matrixRandomPopulatePfiiii,@function
_Z20matrixRandomPopulatePfiiii: # @_Z20matrixRandomPopulatePfiiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r8d, %r12d
movl %ecx, %r15d
movl %esi, %ebp
movq %rdi, %rbx
imull %edx, %ebp
movslq %ebp, %r13
leaq (,%r13,4), %r14
leaq 24(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB2_1
# %bb.3: # %_Z10cudaAssert10hipError_tPKcib.exit
shlq $4, %r13
leaq (,%r13,2), %rsi
addq %r13, %rsi
leaq 16(%rsp), %rdi
callq hipMalloc
testl %eax, %eax
jne .LBB2_4
# %bb.5: # %_Z10cudaAssert10hipError_tPKcib.exit22
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
movl %r15d, %r15d
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %r15
movl %r12d, %r12d
orq %rax, %r12
movq %r15, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_7
# %bb.6:
movq 16(%rsp), %r13
callq rand
cvtsi2ss %eax, %xmm0
divss .LCPI2_0(%rip), %xmm0
cvttss2si %xmm0, %eax
movq %r13, 88(%rsp)
movl %ebp, 32(%rsp)
movl %eax, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 32(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z14cuda_rand_initP12hiprandStateji, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_7:
movq %r15, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_9
# %bb.8:
movq 16(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl %ebp, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9cuda_randP12hiprandStatePfj, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_9:
movq 24(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_10
# %bb.11: # %_Z10cudaAssert10hipError_tPKcib.exit30
movq 24(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB2_12
# %bb.13: # %_Z10cudaAssert10hipError_tPKcib.exit32
movq 16(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB2_14
# %bb.15: # %_Z10cudaAssert10hipError_tPKcib.exit34
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_1:
.cfi_def_cfa_offset 176
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $50, %r8d
jmp .LBB2_2
.LBB2_4:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $51, %r8d
jmp .LBB2_2
.LBB2_10:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $59, %r8d
jmp .LBB2_2
.LBB2_12:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $60, %r8d
jmp .LBB2_2
.LBB2_14:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $61, %r8d
.LBB2_2:
xorl %eax, %eax
callq fprintf
movl %ebp, %edi
callq exit
.Lfunc_end2:
.size _Z20matrixRandomPopulatePfiiii, .Lfunc_end2-_Z20matrixRandomPopulatePfiiii
.cfi_endproc
# -- End function
.globl _Z26matrixRandomPopulateSerialPfii # -- Begin function _Z26matrixRandomPopulateSerialPfii
.p2align 4, 0x90
.type _Z26matrixRandomPopulateSerialPfii,@function
_Z26matrixRandomPopulateSerialPfii: # @_Z26matrixRandomPopulateSerialPfii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, %r14d
movl %esi, %ebp
movq %rdi, %rbx
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
imull %r14d, %ebp
testl %ebp, %ebp
jle .LBB3_3
# %bb.1: # %.lr.ph.preheader
movl %ebp, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB3_2
.LBB3_3: # %._crit_edge
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z26matrixRandomPopulateSerialPfii, .Lfunc_end3-_Z26matrixRandomPopulateSerialPfii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14cuda_rand_initP12hiprandStateji, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9cuda_randP12hiprandStatePfj, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14cuda_rand_initP12hiprandStateji,@object # @_Z14cuda_rand_initP12hiprandStateji
.section .rodata,"a",@progbits
.globl _Z14cuda_rand_initP12hiprandStateji
.p2align 3, 0x0
_Z14cuda_rand_initP12hiprandStateji:
.quad _Z29__device_stub__cuda_rand_initP12hiprandStateji
.size _Z14cuda_rand_initP12hiprandStateji, 8
.type _Z9cuda_randP12hiprandStatePfj,@object # @_Z9cuda_randP12hiprandStatePfj
.globl _Z9cuda_randP12hiprandStatePfj
.p2align 3, 0x0
_Z9cuda_randP12hiprandStatePfj:
.quad _Z24__device_stub__cuda_randP12hiprandStatePfj
.size _Z9cuda_randP12hiprandStatePfj, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/michael-riess/cuda-da-smacof/master/libs/random/random.hip"
.size .L.str, 116
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "CUDA-Assert: %s %s %d\n"
.size .L.str.1, 23
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14cuda_rand_initP12hiprandStateji"
.size .L__unnamed_1, 36
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z9cuda_randP12hiprandStatePfj"
.size .L__unnamed_2, 31
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__cuda_rand_initP12hiprandStateji
.addrsig_sym _Z24__device_stub__cuda_randP12hiprandStatePfj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14cuda_rand_initP12hiprandStateji
.addrsig_sym _Z9cuda_randP12hiprandStatePfj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /* File: cuda_wtime.cu */
/* Description: a timer that reports the current wall time */
/* */
/* Author: K Sujith Bhatt */
/* Chirag K */
/* National Institute of Technology Karnataka */
/* ------------------------------------------------------------------------- */
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
double wtime(void)
{
double now_time;
struct timeval etstart;
struct timezone tzp;
if (gettimeofday(&etstart, &tzp) == -1)
perror("Error: calling gettimeofday() not successful.\n");
now_time = ((double)etstart.tv_sec) + /* in seconds */
((double)etstart.tv_usec) / 1000000.0; /* in microseconds */
return now_time;
}
#ifdef _TESTING_
int main(int argc, char **argv) {
double time;
time = wtime();
printf("time of day = %10.4f\n", time);
return 0;
}
#endif | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* File: cuda_wtime.cu */
/* Description: a timer that reports the current wall time */
/* */
/* Author: K Sujith Bhatt */
/* Chirag K */
/* National Institute of Technology Karnataka */
/* ------------------------------------------------------------------------- */
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
double wtime(void)
{
double now_time;
struct timeval etstart;
struct timezone tzp;
if (gettimeofday(&etstart, &tzp) == -1)
perror("Error: calling gettimeofday() not successful.\n");
now_time = ((double)etstart.tv_sec) + /* in seconds */
((double)etstart.tv_usec) / 1000000.0; /* in microseconds */
return now_time;
}
#ifdef _TESTING_
int main(int argc, char **argv) {
double time;
time = wtime();
printf("time of day = %10.4f\n", time);
return 0;
}
#endif | .file "tmpxft_00176d6a_00000000-6_cuda_wtime.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Error: calling gettimeofday() not successful.\n"
.text
.globl _Z5wtimev
.type _Z5wtimev, @function
_Z5wtimev:
.LFB2057:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rsi
leaq 16(%rsp), %rdi
call gettimeofday@PLT
cmpl $-1, %eax
je .L7
.L4:
pxor %xmm0, %xmm0
cvtsi2sdq 24(%rsp), %xmm0
divsd .LC1(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq 16(%rsp), %xmm1
addsd %xmm1, %xmm0
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
leaq .LC0(%rip), %rdi
call perror@PLT
jmp .L4
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z5wtimev, .-_Z5wtimev
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* File: cuda_wtime.cu */
/* Description: a timer that reports the current wall time */
/* */
/* Author: K Sujith Bhatt */
/* Chirag K */
/* National Institute of Technology Karnataka */
/* ------------------------------------------------------------------------- */
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
double wtime(void)
{
double now_time;
struct timeval etstart;
struct timezone tzp;
if (gettimeofday(&etstart, &tzp) == -1)
perror("Error: calling gettimeofday() not successful.\n");
now_time = ((double)etstart.tv_sec) + /* in seconds */
((double)etstart.tv_usec) / 1000000.0; /* in microseconds */
return now_time;
}
#ifdef _TESTING_
int main(int argc, char **argv) {
double time;
time = wtime();
printf("time of day = %10.4f\n", time);
return 0;
}
#endif | /* File: cuda_wtime.cu */
/* Description: a timer that reports the current wall time */
/* */
/* Author: K Sujith Bhatt */
/* Chirag K */
/* National Institute of Technology Karnataka */
/* ------------------------------------------------------------------------- */
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
double wtime(void)
{
double now_time;
struct timeval etstart;
struct timezone tzp;
if (gettimeofday(&etstart, &tzp) == -1)
perror("Error: calling gettimeofday() not successful.\n");
now_time = ((double)etstart.tv_sec) + /* in seconds */
((double)etstart.tv_usec) / 1000000.0; /* in microseconds */
return now_time;
}
#ifdef _TESTING_
int main(int argc, char **argv) {
double time;
time = wtime();
printf("time of day = %10.4f\n", time);
return 0;
}
#endif |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /* File: cuda_wtime.cu */
/* Description: a timer that reports the current wall time */
/* */
/* Author: K Sujith Bhatt */
/* Chirag K */
/* National Institute of Technology Karnataka */
/* ------------------------------------------------------------------------- */
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
double wtime(void)
{
double now_time;
struct timeval etstart;
struct timezone tzp;
if (gettimeofday(&etstart, &tzp) == -1)
perror("Error: calling gettimeofday() not successful.\n");
now_time = ((double)etstart.tv_sec) + /* in seconds */
((double)etstart.tv_usec) / 1000000.0; /* in microseconds */
return now_time;
}
#ifdef _TESTING_
int main(int argc, char **argv) {
double time;
time = wtime();
printf("time of day = %10.4f\n", time);
return 0;
}
#endif | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* File: cuda_wtime.cu */
/* Description: a timer that reports the current wall time */
/* */
/* Author: K Sujith Bhatt */
/* Chirag K */
/* National Institute of Technology Karnataka */
/* ------------------------------------------------------------------------- */
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
double wtime(void)
{
double now_time;
struct timeval etstart;
struct timezone tzp;
if (gettimeofday(&etstart, &tzp) == -1)
perror("Error: calling gettimeofday() not successful.\n");
now_time = ((double)etstart.tv_sec) + /* in seconds */
((double)etstart.tv_usec) / 1000000.0; /* in microseconds */
return now_time;
}
#ifdef _TESTING_
int main(int argc, char **argv) {
double time;
time = wtime();
printf("time of day = %10.4f\n", time);
return 0;
}
#endif | .text
.file "cuda_wtime.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z5wtimev
.LCPI0_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z5wtimev
.p2align 4, 0x90
.type _Z5wtimev,@function
_Z5wtimev: # @_Z5wtimev
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %rsp, %rdi
leaq 16(%rsp), %rsi
callq gettimeofday
cmpl $-1, %eax
je .LBB0_1
.LBB0_2:
cvtsi2sdq (%rsp), %xmm1
cvtsi2sdq 8(%rsp), %xmm0
divsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.LBB0_1:
.cfi_def_cfa_offset 32
movl $.L.str, %edi
callq perror
jmp .LBB0_2
.Lfunc_end0:
.size _Z5wtimev, .Lfunc_end0-_Z5wtimev
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error: calling gettimeofday() not successful.\n"
.size .L.str, 47
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00176d6a_00000000-6_cuda_wtime.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Error: calling gettimeofday() not successful.\n"
.text
.globl _Z5wtimev
.type _Z5wtimev, @function
_Z5wtimev:
.LFB2057:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rsi
leaq 16(%rsp), %rdi
call gettimeofday@PLT
cmpl $-1, %eax
je .L7
.L4:
pxor %xmm0, %xmm0
cvtsi2sdq 24(%rsp), %xmm0
divsd .LC1(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq 16(%rsp), %xmm1
addsd %xmm1, %xmm0
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
leaq .LC0(%rip), %rdi
call perror@PLT
jmp .L4
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z5wtimev, .-_Z5wtimev
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuda_wtime.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z5wtimev
.LCPI0_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z5wtimev
.p2align 4, 0x90
.type _Z5wtimev,@function
_Z5wtimev: # @_Z5wtimev
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %rsp, %rdi
leaq 16(%rsp), %rsi
callq gettimeofday
cmpl $-1, %eax
je .LBB0_1
.LBB0_2:
cvtsi2sdq (%rsp), %xmm1
cvtsi2sdq 8(%rsp), %xmm0
divsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.LBB0_1:
.cfi_def_cfa_offset 32
movl $.L.str, %edi
callq perror
jmp .LBB0_2
.Lfunc_end0:
.size _Z5wtimev, .Lfunc_end0-_Z5wtimev
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error: calling gettimeofday() not successful.\n"
.size .L.str, 47
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #define BLOCK_SIZE 512
#define SECTION_SIZE 1024 // define section size (size of subarray to be handled) to be twice the block size
__global__ void work_efficient_inclusive_scan(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
// Load elements from input into in-place array
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
// Each thread loads 2 elements, since section size is double block size
if(t + start < in_size) {
if(t == 0 && blockIdx.x == 0) {
XY[t] = 0;
} else {
XY[t] = X[start + t - 1];
}
}
if(t + start + BLOCK_SIZE < in_size) {
XY[t + BLOCK_SIZE] = X[start + t + BLOCK_SIZE - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t + start < in_size) {
Y[start + t] = XY[t];
}
if(t + start < in_size && t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = XY[t + BLOCK_SIZE];
}
}
/*
* Note: this kernel is based off the assumption that the GRID_DIM is 1024, or exactly
* twice the BLOCK_DIM. This way, one section/2 blocks/1 thread block in this prefix-scan stage will be able to
* exactly handle all the block outputs from the previous prefix-scan stage.
*/
__global__ void work_efficient_inclusive_scan_2(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
unsigned int t = threadIdx.x;
// Each thread loads 2 elements, each element being the last element of every SECTION from last kernel
if(SECTION_SIZE * (t+1) - 1 < in_size) {
XY[t] = X[SECTION_SIZE * (t+1) - 1];
}
if(SECTION_SIZE * (t+BLOCK_SIZE+1) - 1 < in_size) {
XY[t+BLOCK_SIZE] = X[SECTION_SIZE * (t+BLOCK_SIZE+1) - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t < in_size) {
Y[t] = XY[t];
}
if(t+BLOCK_SIZE < in_size) {
Y[t+BLOCK_SIZE] = XY[t+BLOCK_SIZE];
}
}
__global__ void work_efficient_inclusive_scan_3(float *X2, float *X, float *Y, unsigned in_size) {
unsigned int t = threadIdx.x;
// Cp threads to output array (each thread copies 2 elements and add result from prev kernel
unsigned int start = 2 * blockIdx.x * blockDim.x;
if(start != 0) { // Do for blocks 1 onwards
if(start + t < in_size) {
Y[start + t] = X2[start + t] + X[blockIdx.x - 1];
}
if(start + t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE] + X[blockIdx.x - 1];
}
} else {
if(start + t < in_size) {
Y[start + t] = X2[start + t];
}
if(start + t + BLOCK_SIZE) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE];
}
}
}
void preScan(float *out2, float *in, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan<<<DimGrid, DimBlock>>>(in, out2, in_size);
}
void preScan2(float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid(1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan_2<<<DimGrid, DimBlock>>>(out2, out3, in_size);
}
void preScan3(float *out, float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan_3<<<DimGrid, DimBlock>>>(out2, out3, out, in_size);
} | .file "tmpxft_000f6e0e_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2032:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2032:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z52__device_stub__Z29work_efficient_inclusive_scanPfS_jPfS_j
.type _Z52__device_stub__Z29work_efficient_inclusive_scanPfS_jPfS_j, @function
_Z52__device_stub__Z29work_efficient_inclusive_scanPfS_jPfS_j:
.LFB2054:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z29work_efficient_inclusive_scanPfS_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2054:
.size _Z52__device_stub__Z29work_efficient_inclusive_scanPfS_jPfS_j, .-_Z52__device_stub__Z29work_efficient_inclusive_scanPfS_jPfS_j
.globl _Z29work_efficient_inclusive_scanPfS_j
.type _Z29work_efficient_inclusive_scanPfS_j, @function
_Z29work_efficient_inclusive_scanPfS_j:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z52__device_stub__Z29work_efficient_inclusive_scanPfS_jPfS_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _Z29work_efficient_inclusive_scanPfS_j, .-_Z29work_efficient_inclusive_scanPfS_j
.globl _Z7preScanPfS_j
.type _Z7preScanPfS_j, @function
_Z7preScanPfS_j:
.LFB2027:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r12
movq %rsi, %rbp
movl %edx, %ebx
leal -1(%rdx), %eax
shrl $10, %eax
addl $1, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $512, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L11:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z52__device_stub__Z29work_efficient_inclusive_scanPfS_jPfS_j
jmp .L11
.cfi_endproc
.LFE2027:
.size _Z7preScanPfS_j, .-_Z7preScanPfS_j
.globl _Z54__device_stub__Z31work_efficient_inclusive_scan_2PfS_jPfS_j
.type _Z54__device_stub__Z31work_efficient_inclusive_scan_2PfS_jPfS_j, @function
_Z54__device_stub__Z31work_efficient_inclusive_scan_2PfS_jPfS_j:
.LFB2056:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z31work_efficient_inclusive_scan_2PfS_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2056:
.size _Z54__device_stub__Z31work_efficient_inclusive_scan_2PfS_jPfS_j, .-_Z54__device_stub__Z31work_efficient_inclusive_scan_2PfS_jPfS_j
.globl _Z31work_efficient_inclusive_scan_2PfS_j
.type _Z31work_efficient_inclusive_scan_2PfS_j, @function
_Z31work_efficient_inclusive_scan_2PfS_j:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z54__device_stub__Z31work_efficient_inclusive_scan_2PfS_jPfS_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z31work_efficient_inclusive_scan_2PfS_j, .-_Z31work_efficient_inclusive_scan_2PfS_j
.globl _Z8preScan2PfS_j
.type _Z8preScan2PfS_j, @function
_Z8preScan2PfS_j:
.LFB2028:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movq %rsi, %rbx
movl %edx, %r12d
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $512, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L26
.L23:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
movl %r12d, %edx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z54__device_stub__Z31work_efficient_inclusive_scan_2PfS_jPfS_j
jmp .L23
.cfi_endproc
.LFE2028:
.size _Z8preScan2PfS_j, .-_Z8preScan2PfS_j
.globl _Z56__device_stub__Z31work_efficient_inclusive_scan_3PfS_S_jPfS_S_j
.type _Z56__device_stub__Z31work_efficient_inclusive_scan_3PfS_S_jPfS_S_j, @function
_Z56__device_stub__Z31work_efficient_inclusive_scan_3PfS_S_jPfS_S_j:
.LFB2058:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z31work_efficient_inclusive_scan_3PfS_S_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z56__device_stub__Z31work_efficient_inclusive_scan_3PfS_S_jPfS_S_j, .-_Z56__device_stub__Z31work_efficient_inclusive_scan_3PfS_S_jPfS_S_j
.globl _Z31work_efficient_inclusive_scan_3PfS_S_j
.type _Z31work_efficient_inclusive_scan_3PfS_S_j, @function
_Z31work_efficient_inclusive_scan_3PfS_S_j:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z56__device_stub__Z31work_efficient_inclusive_scan_3PfS_S_jPfS_S_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z31work_efficient_inclusive_scan_3PfS_S_j, .-_Z31work_efficient_inclusive_scan_3PfS_S_j
.globl _Z8preScan3PfS_S_j
.type _Z8preScan3PfS_S_j, @function
_Z8preScan3PfS_S_j:
.LFB2029:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %r13
movq %rsi, %r12
movq %rdx, %rbp
movl %ecx, %ebx
leal -1(%rcx), %eax
shrl $10, %eax
addl $1, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $512, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L38
.L35:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L38:
.cfi_restore_state
movl %ebx, %ecx
movq %r13, %rdx
movq %r12, %rsi
movq %rbp, %rdi
call _Z56__device_stub__Z31work_efficient_inclusive_scan_3PfS_S_jPfS_S_j
jmp .L35
.cfi_endproc
.LFE2029:
.size _Z8preScan3PfS_S_j, .-_Z8preScan3PfS_S_j
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z31work_efficient_inclusive_scan_3PfS_S_j"
.align 8
.LC1:
.string "_Z31work_efficient_inclusive_scan_2PfS_j"
.align 8
.LC2:
.string "_Z29work_efficient_inclusive_scanPfS_j"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2061:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z31work_efficient_inclusive_scan_3PfS_S_j(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z31work_efficient_inclusive_scan_2PfS_j(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z29work_efficient_inclusive_scanPfS_j(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #define BLOCK_SIZE 512
#define SECTION_SIZE 1024 // define section size (size of subarray to be handled) to be twice the block size
__global__ void work_efficient_inclusive_scan(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
// Load elements from input into in-place array
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
// Each thread loads 2 elements, since section size is double block size
if(t + start < in_size) {
if(t == 0 && blockIdx.x == 0) {
XY[t] = 0;
} else {
XY[t] = X[start + t - 1];
}
}
if(t + start + BLOCK_SIZE < in_size) {
XY[t + BLOCK_SIZE] = X[start + t + BLOCK_SIZE - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t + start < in_size) {
Y[start + t] = XY[t];
}
if(t + start < in_size && t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = XY[t + BLOCK_SIZE];
}
}
/*
* Note: this kernel is based off the assumption that the GRID_DIM is 1024, or exactly
* twice the BLOCK_DIM. This way, one section/2 blocks/1 thread block in this prefix-scan stage will be able to
* exactly handle all the block outputs from the previous prefix-scan stage.
*/
__global__ void work_efficient_inclusive_scan_2(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
unsigned int t = threadIdx.x;
// Each thread loads 2 elements, each element being the last element of every SECTION from last kernel
if(SECTION_SIZE * (t+1) - 1 < in_size) {
XY[t] = X[SECTION_SIZE * (t+1) - 1];
}
if(SECTION_SIZE * (t+BLOCK_SIZE+1) - 1 < in_size) {
XY[t+BLOCK_SIZE] = X[SECTION_SIZE * (t+BLOCK_SIZE+1) - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t < in_size) {
Y[t] = XY[t];
}
if(t+BLOCK_SIZE < in_size) {
Y[t+BLOCK_SIZE] = XY[t+BLOCK_SIZE];
}
}
__global__ void work_efficient_inclusive_scan_3(float *X2, float *X, float *Y, unsigned in_size) {
unsigned int t = threadIdx.x;
// Cp threads to output array (each thread copies 2 elements and add result from prev kernel
unsigned int start = 2 * blockIdx.x * blockDim.x;
if(start != 0) { // Do for blocks 1 onwards
if(start + t < in_size) {
Y[start + t] = X2[start + t] + X[blockIdx.x - 1];
}
if(start + t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE] + X[blockIdx.x - 1];
}
} else {
if(start + t < in_size) {
Y[start + t] = X2[start + t];
}
if(start + t + BLOCK_SIZE) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE];
}
}
}
void preScan(float *out2, float *in, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan<<<DimGrid, DimBlock>>>(in, out2, in_size);
}
void preScan2(float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid(1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan_2<<<DimGrid, DimBlock>>>(out2, out3, in_size);
}
void preScan3(float *out, float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan_3<<<DimGrid, DimBlock>>>(out2, out3, out, in_size);
} | #include <hip/hip_runtime.h>
#define BLOCK_SIZE 512
#define SECTION_SIZE 1024 // define section size (size of subarray to be handled) to be twice the block size
__global__ void work_efficient_inclusive_scan(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
// Load elements from input into in-place array
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
// Each thread loads 2 elements, since section size is double block size
if(t + start < in_size) {
if(t == 0 && blockIdx.x == 0) {
XY[t] = 0;
} else {
XY[t] = X[start + t - 1];
}
}
if(t + start + BLOCK_SIZE < in_size) {
XY[t + BLOCK_SIZE] = X[start + t + BLOCK_SIZE - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t + start < in_size) {
Y[start + t] = XY[t];
}
if(t + start < in_size && t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = XY[t + BLOCK_SIZE];
}
}
/*
* Note: this kernel is based off the assumption that the GRID_DIM is 1024, or exactly
* twice the BLOCK_DIM. This way, one section/2 blocks/1 thread block in this prefix-scan stage will be able to
* exactly handle all the block outputs from the previous prefix-scan stage.
*/
__global__ void work_efficient_inclusive_scan_2(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
unsigned int t = threadIdx.x;
// Each thread loads 2 elements, each element being the last element of every SECTION from last kernel
if(SECTION_SIZE * (t+1) - 1 < in_size) {
XY[t] = X[SECTION_SIZE * (t+1) - 1];
}
if(SECTION_SIZE * (t+BLOCK_SIZE+1) - 1 < in_size) {
XY[t+BLOCK_SIZE] = X[SECTION_SIZE * (t+BLOCK_SIZE+1) - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t < in_size) {
Y[t] = XY[t];
}
if(t+BLOCK_SIZE < in_size) {
Y[t+BLOCK_SIZE] = XY[t+BLOCK_SIZE];
}
}
__global__ void work_efficient_inclusive_scan_3(float *X2, float *X, float *Y, unsigned in_size) {
unsigned int t = threadIdx.x;
// Cp threads to output array (each thread copies 2 elements and add result from prev kernel
unsigned int start = 2 * blockIdx.x * blockDim.x;
if(start != 0) { // Do for blocks 1 onwards
if(start + t < in_size) {
Y[start + t] = X2[start + t] + X[blockIdx.x - 1];
}
if(start + t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE] + X[blockIdx.x - 1];
}
} else {
if(start + t < in_size) {
Y[start + t] = X2[start + t];
}
if(start + t + BLOCK_SIZE) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE];
}
}
}
void preScan(float *out2, float *in, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan<<<DimGrid, DimBlock>>>(in, out2, in_size);
}
void preScan2(float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid(1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan_2<<<DimGrid, DimBlock>>>(out2, out3, in_size);
}
void preScan3(float *out, float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan_3<<<DimGrid, DimBlock>>>(out2, out3, out, in_size);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#define BLOCK_SIZE 512
#define SECTION_SIZE 1024 // define section size (size of subarray to be handled) to be twice the block size
__global__ void work_efficient_inclusive_scan(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
// Load elements from input into in-place array
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
// Each thread loads 2 elements, since section size is double block size
if(t + start < in_size) {
if(t == 0 && blockIdx.x == 0) {
XY[t] = 0;
} else {
XY[t] = X[start + t - 1];
}
}
if(t + start + BLOCK_SIZE < in_size) {
XY[t + BLOCK_SIZE] = X[start + t + BLOCK_SIZE - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t + start < in_size) {
Y[start + t] = XY[t];
}
if(t + start < in_size && t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = XY[t + BLOCK_SIZE];
}
}
/*
* Note: this kernel is based off the assumption that the GRID_DIM is 1024, or exactly
* twice the BLOCK_DIM. This way, one section/2 blocks/1 thread block in this prefix-scan stage will be able to
* exactly handle all the block outputs from the previous prefix-scan stage.
*/
__global__ void work_efficient_inclusive_scan_2(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
unsigned int t = threadIdx.x;
// Each thread loads 2 elements, each element being the last element of every SECTION from last kernel
if(SECTION_SIZE * (t+1) - 1 < in_size) {
XY[t] = X[SECTION_SIZE * (t+1) - 1];
}
if(SECTION_SIZE * (t+BLOCK_SIZE+1) - 1 < in_size) {
XY[t+BLOCK_SIZE] = X[SECTION_SIZE * (t+BLOCK_SIZE+1) - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t < in_size) {
Y[t] = XY[t];
}
if(t+BLOCK_SIZE < in_size) {
Y[t+BLOCK_SIZE] = XY[t+BLOCK_SIZE];
}
}
__global__ void work_efficient_inclusive_scan_3(float *X2, float *X, float *Y, unsigned in_size) {
unsigned int t = threadIdx.x;
// Cp threads to output array (each thread copies 2 elements and add result from prev kernel
unsigned int start = 2 * blockIdx.x * blockDim.x;
if(start != 0) { // Do for blocks 1 onwards
if(start + t < in_size) {
Y[start + t] = X2[start + t] + X[blockIdx.x - 1];
}
if(start + t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE] + X[blockIdx.x - 1];
}
} else {
if(start + t < in_size) {
Y[start + t] = X2[start + t];
}
if(start + t + BLOCK_SIZE) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE];
}
}
}
void preScan(float *out2, float *in, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan<<<DimGrid, DimBlock>>>(in, out2, in_size);
}
void preScan2(float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid(1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan_2<<<DimGrid, DimBlock>>>(out2, out3, in_size);
}
void preScan3(float *out, float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan_3<<<DimGrid, DimBlock>>>(out2, out3, out, in_size);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z29work_efficient_inclusive_scanPfS_j
.globl _Z29work_efficient_inclusive_scanPfS_j
.p2align 8
.type _Z29work_efficient_inclusive_scanPfS_j,@function
_Z29work_efficient_inclusive_scanPfS_j:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_load_b64 s[4:5], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s2, s15, s2
v_lshl_add_u32 v1, s2, 1, v0
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_u32_e32 vcc_lo, s3, v1
s_and_saveexec_b32 s6, vcc_lo
s_cbranch_execz .LBB0_5
v_or_b32_e32 v2, s15, v0
s_mov_b32 s7, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_ne_u32_e32 0, v2
s_xor_b32 s7, exec_lo, s7
s_cbranch_execz .LBB0_3
v_dual_mov_b32 v3, 0 :: v_dual_add_nc_u32 v2, -1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v2, s2, s4, v2
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s2, s5, v3, s2
global_load_b32 v2, v[2:3], off
v_lshlrev_b32_e32 v3, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v3, v2
.LBB0_3:
s_and_not1_saveexec_b32 s2, s7
s_cbranch_execz .LBB0_5
v_mov_b32_e32 v2, 0
ds_store_b32 v2, v2
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s6
v_add_nc_u32_e32 v3, 0x200, v1
s_mov_b32 s6, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s3, v3
s_cbranch_execz .LBB0_7
v_dual_mov_b32 v5, 0 :: v_dual_add_nc_u32 v4, 0x1ff, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[4:5]
v_add_co_u32 v4, s2, s4, v4
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v5, s2, s5, v5, s2
global_load_b32 v2, v[4:5], off
v_lshlrev_b32_e32 v4, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v4, v2 offset:2048
.LBB0_7:
s_or_b32 exec_lo, exec_lo, s6
v_lshl_add_u32 v2, v0, 1, 2
s_mov_b32 s4, 1
s_branch .LBB0_9
.p2align 6
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s5
s_lshl_b32 s2, s4, 1
s_cmpk_gt_u32 s4, 0x100
s_mov_b32 s4, s2
s_cbranch_scc1 .LBB0_11
.LBB0_9:
s_delay_alu instid0(VALU_DEP_1)
v_mul_lo_u32 v4, s4, v2
s_mov_b32 s5, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_u32_e32 0x401, v4
s_cbranch_execz .LBB0_8
v_add_nc_u32_e32 v4, -1, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s4, v4
v_lshlrev_b32_e32 v4, 2, v4
v_lshlrev_b32_e32 v5, 2, v5
ds_load_b32 v5, v5
ds_load_b32 v6, v4
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v5, v5, v6
ds_store_b32 v4, v5
s_branch .LBB0_8
.LBB0_11:
v_lshl_add_u32 v2, v0, 1, 2
s_movk_i32 s4, 0x100
s_branch .LBB0_13
.p2align 6
.LBB0_12:
s_or_b32 exec_lo, exec_lo, s5
s_lshr_b32 s2, s4, 1
s_cmp_lt_u32 s4, 2
s_mov_b32 s4, s2
s_cbranch_scc1 .LBB0_15
.LBB0_13:
s_delay_alu instid0(VALU_DEP_1)
v_mad_u32_u24 v5, s4, v2, -1
s_mov_b32 s5, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_add_nc_u32_e32 v4, s4, v5
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e32 0x400, v4
s_cbranch_execz .LBB0_12
v_lshlrev_b32_e32 v5, 2, v5
v_lshlrev_b32_e32 v4, 2, v4
ds_load_b32 v5, v5
ds_load_b32 v6, v4
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v5, v5, v6
ds_store_b32 v4, v5
s_branch .LBB0_12
.LBB0_15:
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_18
s_load_b64 s[0:1], s[0:1], 0x8
v_lshlrev_b32_e32 v2, 2, v0
v_add_nc_u32_e32 v0, 0x200, v0
ds_load_b32 v6, v2
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s0, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
v_cmp_gt_u32_e32 vcc_lo, s3, v0
global_store_b32 v[4:5], v6, off
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_18
v_lshlrev_b32_e32 v0, 2, v0
v_mov_b32_e32 v4, v2
ds_load_b32 v5, v0
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt lgkmcnt(0)
global_store_b32 v[0:1], v5, off
.LBB0_18:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z29work_efficient_inclusive_scanPfS_j
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z29work_efficient_inclusive_scanPfS_j, .Lfunc_end0-_Z29work_efficient_inclusive_scanPfS_j
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z31work_efficient_inclusive_scan_2PfS_j
.globl _Z31work_efficient_inclusive_scan_2PfS_j
.p2align 8
.type _Z31work_efficient_inclusive_scan_2PfS_j,@function
_Z31work_efficient_inclusive_scan_2PfS_j:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x10
s_load_b64 s[2:3], s[0:1], 0x0
v_lshlrev_b32_e32 v1, 10, v0
s_mov_b32 s5, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_or_b32_e32 v2, 0x3ff, v1
s_waitcnt lgkmcnt(0)
v_cmpx_gt_u32_e64 s4, v2
s_cbranch_execz .LBB1_2
v_lshlrev_b32_e32 v2, 2, v2
v_lshlrev_b32_e32 v3, 2, v0
global_load_b32 v2, v2, s[2:3]
s_waitcnt vmcnt(0)
ds_store_b32 v3, v2
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s5
v_add_nc_u32_e32 v1, 0x803ff, v1
s_mov_b32 s5, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s4, v1
s_cbranch_execz .LBB1_4
v_lshlrev_b32_e32 v1, 2, v1
v_lshlrev_b32_e32 v2, 2, v0
global_load_b32 v1, v1, s[2:3]
s_waitcnt vmcnt(0)
ds_store_b32 v2, v1 offset:2048
.LBB1_4:
s_or_b32 exec_lo, exec_lo, s5
v_lshl_add_u32 v1, v0, 1, 2
s_mov_b32 s2, 1
s_branch .LBB1_6
.p2align 6
.LBB1_5:
s_or_b32 exec_lo, exec_lo, s3
s_lshl_b32 s3, s2, 1
s_cmpk_gt_u32 s2, 0x100
s_mov_b32 s2, s3
s_cbranch_scc1 .LBB1_8
.LBB1_6:
s_delay_alu instid0(VALU_DEP_1)
v_mul_lo_u32 v2, s2, v1
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_u32_e32 0x401, v2
s_cbranch_execz .LBB1_5
v_add_nc_u32_e32 v2, -1, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v3, s2, v2
v_lshlrev_b32_e32 v2, 2, v2
v_lshlrev_b32_e32 v3, 2, v3
ds_load_b32 v3, v3
ds_load_b32 v4, v2
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v3, v3, v4
ds_store_b32 v2, v3
s_branch .LBB1_5
.LBB1_8:
v_lshl_add_u32 v1, v0, 1, 2
s_movk_i32 s2, 0x100
s_branch .LBB1_10
.p2align 6
.LBB1_9:
s_or_b32 exec_lo, exec_lo, s3
s_lshr_b32 s3, s2, 1
s_cmp_lt_u32 s2, 2
s_mov_b32 s2, s3
s_cbranch_scc1 .LBB1_12
.LBB1_10:
s_delay_alu instid0(VALU_DEP_1)
v_mad_u32_u24 v3, s2, v1, -1
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_add_nc_u32_e32 v2, s2, v3
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e32 0x400, v2
s_cbranch_execz .LBB1_9
v_lshlrev_b32_e32 v3, 2, v3
v_lshlrev_b32_e32 v2, 2, v2
ds_load_b32 v3, v3
ds_load_b32 v4, v2
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v3, v3, v4
ds_store_b32 v2, v3
s_branch .LBB1_9
.LBB1_12:
s_load_b64 s[0:1], s[0:1], 0x8
s_mov_b32 s2, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB1_14
v_lshlrev_b32_e32 v1, 2, v0
ds_load_b32 v2, v1
s_waitcnt lgkmcnt(0)
global_store_b32 v1, v2, s[0:1]
.LBB1_14:
s_or_b32 exec_lo, exec_lo, s2
v_add_nc_u32_e32 v0, 0x200, v0
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB1_16
v_lshlrev_b32_e32 v0, 2, v0
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v1, s[0:1]
.LBB1_16:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z31work_efficient_inclusive_scan_2PfS_j
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 6
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z31work_efficient_inclusive_scan_2PfS_j, .Lfunc_end1-_Z31work_efficient_inclusive_scan_2PfS_j
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z31work_efficient_inclusive_scan_3PfS_S_j
.globl _Z31work_efficient_inclusive_scan_3PfS_S_j
.p2align 8
.type _Z31work_efficient_inclusive_scan_3PfS_S_j,@function
_Z31work_efficient_inclusive_scan_3PfS_S_j:
s_clause 0x3
s_load_b32 s7, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
s_load_b32 s6, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s7, s7, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s7, s15, s7
s_lshl_b32 s7, s7, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s7, 0
s_cbranch_scc1 .LBB2_8
s_load_b64 s[0:1], s[0:1], 0x8
v_add_nc_u32_e32 v1, s7, v0
s_mov_b32 s7, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s6, v1
s_cbranch_execz .LBB2_3
v_mov_b32_e32 v2, 0
s_mov_b32 s9, 0
s_add_i32 s8, s15, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_lshl_b64 s[8:9], s[8:9], 2
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_add_u32 s8, s0, s8
s_addc_u32 s9, s1, s9
s_load_b32 s8, s[8:9], 0x0
s_delay_alu instid0(VALU_DEP_1)
v_add_co_u32 v4, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_f32_e32 v4, s8, v4
global_store_b32 v[2:3], v4, off
.LBB2_3:
s_or_b32 exec_lo, exec_lo, s7
v_add_nc_u32_e32 v1, 0x200, v1
s_mov_b32 s8, 0
s_mov_b32 s7, 0
s_mov_b32 s9, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s6, v1
s_cbranch_execz .LBB2_5
v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v5, 0
s_mov_b32 s11, 0
s_add_i32 s10, s15, -1
s_mov_b32 s7, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_lshl_b64 s[10:11], s[10:11], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s10
s_addc_u32 s1, s1, s11
v_add_co_u32 v3, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
global_load_b32 v3, v[3:4], off
global_load_b32 v4, v5, s[0:1]
s_waitcnt vmcnt(0)
v_add_f32_e32 v3, v3, v4
.LBB2_5:
s_or_b32 exec_lo, exec_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 vcc_lo, exec_lo, s8
s_cbranch_vccnz .LBB2_9
.LBB2_6:
s_waitcnt lgkmcnt(0)
s_and_saveexec_b32 s0, s7
s_cbranch_execnz .LBB2_12
.LBB2_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.LBB2_8:
s_mov_b32 s7, 0
s_cbranch_execz .LBB2_6
.LBB2_9:
s_waitcnt lgkmcnt(0)
s_mov_b32 s0, exec_lo
v_cmpx_gt_u32_e64 s6, v0
s_cbranch_execz .LBB2_11
v_lshlrev_b32_e32 v1, 2, v0
global_load_b32 v2, v1, s[4:5]
s_waitcnt vmcnt(0)
global_store_b32 v1, v2, s[2:3]
.LBB2_11:
s_or_b32 exec_lo, exec_lo, s0
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v1, 0x200, v0
s_mov_b32 s7, -1
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b32_e32 v0, 2, v1
global_load_b32 v3, v0, s[4:5]
s_and_saveexec_b32 s0, s7
s_cbranch_execz .LBB2_7
.LBB2_12:
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v3, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z31work_efficient_inclusive_scan_3PfS_S_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z31work_efficient_inclusive_scan_3PfS_S_j, .Lfunc_end2-_Z31work_efficient_inclusive_scan_3PfS_S_j
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z29work_efficient_inclusive_scanPfS_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z29work_efficient_inclusive_scanPfS_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z31work_efficient_inclusive_scan_2PfS_j
.private_segment_fixed_size: 0
.sgpr_count: 6
.sgpr_spill_count: 0
.symbol: _Z31work_efficient_inclusive_scan_2PfS_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z31work_efficient_inclusive_scan_3PfS_S_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z31work_efficient_inclusive_scan_3PfS_S_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#define BLOCK_SIZE 512
#define SECTION_SIZE 1024 // define section size (size of subarray to be handled) to be twice the block size
__global__ void work_efficient_inclusive_scan(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
// Load elements from input into in-place array
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
// Each thread loads 2 elements, since section size is double block size
if(t + start < in_size) {
if(t == 0 && blockIdx.x == 0) {
XY[t] = 0;
} else {
XY[t] = X[start + t - 1];
}
}
if(t + start + BLOCK_SIZE < in_size) {
XY[t + BLOCK_SIZE] = X[start + t + BLOCK_SIZE - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t + start < in_size) {
Y[start + t] = XY[t];
}
if(t + start < in_size && t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = XY[t + BLOCK_SIZE];
}
}
/*
* Note: this kernel is based off the assumption that the GRID_DIM is 1024, or exactly
* twice the BLOCK_DIM. This way, one section/2 blocks/1 thread block in this prefix-scan stage will be able to
* exactly handle all the block outputs from the previous prefix-scan stage.
*/
__global__ void work_efficient_inclusive_scan_2(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
unsigned int t = threadIdx.x;
// Each thread loads 2 elements, each element being the last element of every SECTION from last kernel
if(SECTION_SIZE * (t+1) - 1 < in_size) {
XY[t] = X[SECTION_SIZE * (t+1) - 1];
}
if(SECTION_SIZE * (t+BLOCK_SIZE+1) - 1 < in_size) {
XY[t+BLOCK_SIZE] = X[SECTION_SIZE * (t+BLOCK_SIZE+1) - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t < in_size) {
Y[t] = XY[t];
}
if(t+BLOCK_SIZE < in_size) {
Y[t+BLOCK_SIZE] = XY[t+BLOCK_SIZE];
}
}
__global__ void work_efficient_inclusive_scan_3(float *X2, float *X, float *Y, unsigned in_size) {
unsigned int t = threadIdx.x;
// Cp threads to output array (each thread copies 2 elements and add result from prev kernel
unsigned int start = 2 * blockIdx.x * blockDim.x;
if(start != 0) { // Do for blocks 1 onwards
if(start + t < in_size) {
Y[start + t] = X2[start + t] + X[blockIdx.x - 1];
}
if(start + t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE] + X[blockIdx.x - 1];
}
} else {
if(start + t < in_size) {
Y[start + t] = X2[start + t];
}
if(start + t + BLOCK_SIZE) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE];
}
}
}
void preScan(float *out2, float *in, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan<<<DimGrid, DimBlock>>>(in, out2, in_size);
}
void preScan2(float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid(1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan_2<<<DimGrid, DimBlock>>>(out2, out3, in_size);
}
void preScan3(float *out, float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan_3<<<DimGrid, DimBlock>>>(out2, out3, out, in_size);
} | .text
.file "kernel.hip"
.globl _Z44__device_stub__work_efficient_inclusive_scanPfS_j # -- Begin function _Z44__device_stub__work_efficient_inclusive_scanPfS_j
.p2align 4, 0x90
.type _Z44__device_stub__work_efficient_inclusive_scanPfS_j,@function
_Z44__device_stub__work_efficient_inclusive_scanPfS_j: # @_Z44__device_stub__work_efficient_inclusive_scanPfS_j
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z29work_efficient_inclusive_scanPfS_j, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z44__device_stub__work_efficient_inclusive_scanPfS_j, .Lfunc_end0-_Z44__device_stub__work_efficient_inclusive_scanPfS_j
.cfi_endproc
# -- End function
.globl _Z46__device_stub__work_efficient_inclusive_scan_2PfS_j # -- Begin function _Z46__device_stub__work_efficient_inclusive_scan_2PfS_j
.p2align 4, 0x90
.type _Z46__device_stub__work_efficient_inclusive_scan_2PfS_j,@function
_Z46__device_stub__work_efficient_inclusive_scan_2PfS_j: # @_Z46__device_stub__work_efficient_inclusive_scan_2PfS_j
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z31work_efficient_inclusive_scan_2PfS_j, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z46__device_stub__work_efficient_inclusive_scan_2PfS_j, .Lfunc_end1-_Z46__device_stub__work_efficient_inclusive_scan_2PfS_j
.cfi_endproc
# -- End function
.globl _Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j # -- Begin function _Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j
.p2align 4, 0x90
.type _Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j,@function
_Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j: # @_Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z31work_efficient_inclusive_scan_3PfS_S_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j, .Lfunc_end2-_Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j
.cfi_endproc
# -- End function
.globl _Z7preScanPfS_j # -- Begin function _Z7preScanPfS_j
.p2align 4, 0x90
.type _Z7preScanPfS_j,@function
_Z7preScanPfS_j: # @_Z7preScanPfS_j
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %ebx
movq %rsi, %r15
movq %rdi, %r14
leal -1(%rbx), %eax
shrl $10, %eax
movabsq $4294967296, %rdx # imm = 0x100000000
leaq (%rdx,%rax), %rdi
incq %rdi
orq $512, %rdx # imm = 0x200
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movl %ebx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z29work_efficient_inclusive_scanPfS_j, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z7preScanPfS_j, .Lfunc_end3-_Z7preScanPfS_j
.cfi_endproc
# -- End function
.globl _Z8preScan2PfS_j # -- Begin function _Z8preScan2PfS_j
.p2align 4, 0x90
.type _Z8preScan2PfS_j,@function
_Z8preScan2PfS_j: # @_Z8preScan2PfS_j
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %ebx
movq %rsi, %r15
movq %rdi, %r14
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 511(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_2
# %bb.1:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movl %ebx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z31work_efficient_inclusive_scan_2PfS_j, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z8preScan2PfS_j, .Lfunc_end4-_Z8preScan2PfS_j
.cfi_endproc
# -- End function
.globl _Z8preScan3PfS_S_j # -- Begin function _Z8preScan3PfS_S_j
.p2align 4, 0x90
.type _Z8preScan3PfS_S_j,@function
_Z8preScan3PfS_S_j: # @_Z8preScan3PfS_S_j
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %ecx, %ebx
movq %rdx, %r12
movq %rsi, %r15
movq %rdi, %r14
leal -1(%rbx), %eax
shrl $10, %eax
movabsq $4294967296, %rdx # imm = 0x100000000
leaq (%rdx,%rax), %rdi
incq %rdi
orq $512, %rdx # imm = 0x200
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_2
# %bb.1:
movq %r12, 72(%rsp)
movq %r15, 64(%rsp)
movq %r14, 56(%rsp)
movl %ebx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z31work_efficient_inclusive_scan_3PfS_S_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_2:
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z8preScan3PfS_S_j, .Lfunc_end5-_Z8preScan3PfS_S_j
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z29work_efficient_inclusive_scanPfS_j, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z31work_efficient_inclusive_scan_2PfS_j, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z31work_efficient_inclusive_scan_3PfS_S_j, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z29work_efficient_inclusive_scanPfS_j,@object # @_Z29work_efficient_inclusive_scanPfS_j
.section .rodata,"a",@progbits
.globl _Z29work_efficient_inclusive_scanPfS_j
.p2align 3, 0x0
_Z29work_efficient_inclusive_scanPfS_j:
.quad _Z44__device_stub__work_efficient_inclusive_scanPfS_j
.size _Z29work_efficient_inclusive_scanPfS_j, 8
.type _Z31work_efficient_inclusive_scan_2PfS_j,@object # @_Z31work_efficient_inclusive_scan_2PfS_j
.globl _Z31work_efficient_inclusive_scan_2PfS_j
.p2align 3, 0x0
_Z31work_efficient_inclusive_scan_2PfS_j:
.quad _Z46__device_stub__work_efficient_inclusive_scan_2PfS_j
.size _Z31work_efficient_inclusive_scan_2PfS_j, 8
.type _Z31work_efficient_inclusive_scan_3PfS_S_j,@object # @_Z31work_efficient_inclusive_scan_3PfS_S_j
.globl _Z31work_efficient_inclusive_scan_3PfS_S_j
.p2align 3, 0x0
_Z31work_efficient_inclusive_scan_3PfS_S_j:
.quad _Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j
.size _Z31work_efficient_inclusive_scan_3PfS_S_j, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z29work_efficient_inclusive_scanPfS_j"
.size .L__unnamed_1, 39
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z31work_efficient_inclusive_scan_2PfS_j"
.size .L__unnamed_2, 41
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z31work_efficient_inclusive_scan_3PfS_S_j"
.size .L__unnamed_3, 43
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z44__device_stub__work_efficient_inclusive_scanPfS_j
.addrsig_sym _Z46__device_stub__work_efficient_inclusive_scan_2PfS_j
.addrsig_sym _Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z29work_efficient_inclusive_scanPfS_j
.addrsig_sym _Z31work_efficient_inclusive_scan_2PfS_j
.addrsig_sym _Z31work_efficient_inclusive_scan_3PfS_S_j
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000f6e0e_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2032:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2032:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z52__device_stub__Z29work_efficient_inclusive_scanPfS_jPfS_j
.type _Z52__device_stub__Z29work_efficient_inclusive_scanPfS_jPfS_j, @function
_Z52__device_stub__Z29work_efficient_inclusive_scanPfS_jPfS_j:
.LFB2054:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z29work_efficient_inclusive_scanPfS_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2054:
.size _Z52__device_stub__Z29work_efficient_inclusive_scanPfS_jPfS_j, .-_Z52__device_stub__Z29work_efficient_inclusive_scanPfS_jPfS_j
.globl _Z29work_efficient_inclusive_scanPfS_j
.type _Z29work_efficient_inclusive_scanPfS_j, @function
_Z29work_efficient_inclusive_scanPfS_j:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z52__device_stub__Z29work_efficient_inclusive_scanPfS_jPfS_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _Z29work_efficient_inclusive_scanPfS_j, .-_Z29work_efficient_inclusive_scanPfS_j
.globl _Z7preScanPfS_j
.type _Z7preScanPfS_j, @function
_Z7preScanPfS_j:
.LFB2027:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r12
movq %rsi, %rbp
movl %edx, %ebx
leal -1(%rdx), %eax
shrl $10, %eax
addl $1, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $512, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L11:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z52__device_stub__Z29work_efficient_inclusive_scanPfS_jPfS_j
jmp .L11
.cfi_endproc
.LFE2027:
.size _Z7preScanPfS_j, .-_Z7preScanPfS_j
.globl _Z54__device_stub__Z31work_efficient_inclusive_scan_2PfS_jPfS_j
.type _Z54__device_stub__Z31work_efficient_inclusive_scan_2PfS_jPfS_j, @function
_Z54__device_stub__Z31work_efficient_inclusive_scan_2PfS_jPfS_j:
.LFB2056:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z31work_efficient_inclusive_scan_2PfS_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2056:
.size _Z54__device_stub__Z31work_efficient_inclusive_scan_2PfS_jPfS_j, .-_Z54__device_stub__Z31work_efficient_inclusive_scan_2PfS_jPfS_j
.globl _Z31work_efficient_inclusive_scan_2PfS_j
.type _Z31work_efficient_inclusive_scan_2PfS_j, @function
_Z31work_efficient_inclusive_scan_2PfS_j:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z54__device_stub__Z31work_efficient_inclusive_scan_2PfS_jPfS_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z31work_efficient_inclusive_scan_2PfS_j, .-_Z31work_efficient_inclusive_scan_2PfS_j
.globl _Z8preScan2PfS_j
.type _Z8preScan2PfS_j, @function
_Z8preScan2PfS_j:
.LFB2028:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movq %rsi, %rbx
movl %edx, %r12d
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $512, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L26
.L23:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
movl %r12d, %edx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z54__device_stub__Z31work_efficient_inclusive_scan_2PfS_jPfS_j
jmp .L23
.cfi_endproc
.LFE2028:
.size _Z8preScan2PfS_j, .-_Z8preScan2PfS_j
.globl _Z56__device_stub__Z31work_efficient_inclusive_scan_3PfS_S_jPfS_S_j
.type _Z56__device_stub__Z31work_efficient_inclusive_scan_3PfS_S_jPfS_S_j, @function
_Z56__device_stub__Z31work_efficient_inclusive_scan_3PfS_S_jPfS_S_j:
.LFB2058:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z31work_efficient_inclusive_scan_3PfS_S_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z56__device_stub__Z31work_efficient_inclusive_scan_3PfS_S_jPfS_S_j, .-_Z56__device_stub__Z31work_efficient_inclusive_scan_3PfS_S_jPfS_S_j
.globl _Z31work_efficient_inclusive_scan_3PfS_S_j
.type _Z31work_efficient_inclusive_scan_3PfS_S_j, @function
_Z31work_efficient_inclusive_scan_3PfS_S_j:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z56__device_stub__Z31work_efficient_inclusive_scan_3PfS_S_jPfS_S_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z31work_efficient_inclusive_scan_3PfS_S_j, .-_Z31work_efficient_inclusive_scan_3PfS_S_j
.globl _Z8preScan3PfS_S_j
.type _Z8preScan3PfS_S_j, @function
_Z8preScan3PfS_S_j:
.LFB2029:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %r13
movq %rsi, %r12
movq %rdx, %rbp
movl %ecx, %ebx
leal -1(%rcx), %eax
shrl $10, %eax
addl $1, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $512, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L38
.L35:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L38:
.cfi_restore_state
movl %ebx, %ecx
movq %r13, %rdx
movq %r12, %rsi
movq %rbp, %rdi
call _Z56__device_stub__Z31work_efficient_inclusive_scan_3PfS_S_jPfS_S_j
jmp .L35
.cfi_endproc
.LFE2029:
.size _Z8preScan3PfS_S_j, .-_Z8preScan3PfS_S_j
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z31work_efficient_inclusive_scan_3PfS_S_j"
.align 8
.LC1:
.string "_Z31work_efficient_inclusive_scan_2PfS_j"
.align 8
.LC2:
.string "_Z29work_efficient_inclusive_scanPfS_j"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2061:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z31work_efficient_inclusive_scan_3PfS_S_j(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z31work_efficient_inclusive_scan_2PfS_j(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z29work_efficient_inclusive_scanPfS_j(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel.hip"
.globl _Z44__device_stub__work_efficient_inclusive_scanPfS_j # -- Begin function _Z44__device_stub__work_efficient_inclusive_scanPfS_j
.p2align 4, 0x90
.type _Z44__device_stub__work_efficient_inclusive_scanPfS_j,@function
_Z44__device_stub__work_efficient_inclusive_scanPfS_j: # @_Z44__device_stub__work_efficient_inclusive_scanPfS_j
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z29work_efficient_inclusive_scanPfS_j, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z44__device_stub__work_efficient_inclusive_scanPfS_j, .Lfunc_end0-_Z44__device_stub__work_efficient_inclusive_scanPfS_j
.cfi_endproc
# -- End function
.globl _Z46__device_stub__work_efficient_inclusive_scan_2PfS_j # -- Begin function _Z46__device_stub__work_efficient_inclusive_scan_2PfS_j
.p2align 4, 0x90
.type _Z46__device_stub__work_efficient_inclusive_scan_2PfS_j,@function
_Z46__device_stub__work_efficient_inclusive_scan_2PfS_j: # @_Z46__device_stub__work_efficient_inclusive_scan_2PfS_j
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z31work_efficient_inclusive_scan_2PfS_j, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z46__device_stub__work_efficient_inclusive_scan_2PfS_j, .Lfunc_end1-_Z46__device_stub__work_efficient_inclusive_scan_2PfS_j
.cfi_endproc
# -- End function
.globl _Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j # -- Begin function _Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j
.p2align 4, 0x90
.type _Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j,@function
_Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j: # @_Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z31work_efficient_inclusive_scan_3PfS_S_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j, .Lfunc_end2-_Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j
.cfi_endproc
# -- End function
.globl _Z7preScanPfS_j # -- Begin function _Z7preScanPfS_j
.p2align 4, 0x90
.type _Z7preScanPfS_j,@function
_Z7preScanPfS_j: # @_Z7preScanPfS_j
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %ebx
movq %rsi, %r15
movq %rdi, %r14
leal -1(%rbx), %eax
shrl $10, %eax
movabsq $4294967296, %rdx # imm = 0x100000000
leaq (%rdx,%rax), %rdi
incq %rdi
orq $512, %rdx # imm = 0x200
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movl %ebx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z29work_efficient_inclusive_scanPfS_j, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z7preScanPfS_j, .Lfunc_end3-_Z7preScanPfS_j
.cfi_endproc
# -- End function
.globl _Z8preScan2PfS_j # -- Begin function _Z8preScan2PfS_j
.p2align 4, 0x90
.type _Z8preScan2PfS_j,@function
_Z8preScan2PfS_j: # @_Z8preScan2PfS_j
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %ebx
movq %rsi, %r15
movq %rdi, %r14
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 511(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_2
# %bb.1:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movl %ebx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z31work_efficient_inclusive_scan_2PfS_j, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z8preScan2PfS_j, .Lfunc_end4-_Z8preScan2PfS_j
.cfi_endproc
# -- End function
.globl _Z8preScan3PfS_S_j # -- Begin function _Z8preScan3PfS_S_j
.p2align 4, 0x90
.type _Z8preScan3PfS_S_j,@function
_Z8preScan3PfS_S_j: # @_Z8preScan3PfS_S_j
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %ecx, %ebx
movq %rdx, %r12
movq %rsi, %r15
movq %rdi, %r14
leal -1(%rbx), %eax
shrl $10, %eax
movabsq $4294967296, %rdx # imm = 0x100000000
leaq (%rdx,%rax), %rdi
incq %rdi
orq $512, %rdx # imm = 0x200
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_2
# %bb.1:
movq %r12, 72(%rsp)
movq %r15, 64(%rsp)
movq %r14, 56(%rsp)
movl %ebx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z31work_efficient_inclusive_scan_3PfS_S_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_2:
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z8preScan3PfS_S_j, .Lfunc_end5-_Z8preScan3PfS_S_j
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z29work_efficient_inclusive_scanPfS_j, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z31work_efficient_inclusive_scan_2PfS_j, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z31work_efficient_inclusive_scan_3PfS_S_j, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z29work_efficient_inclusive_scanPfS_j,@object # @_Z29work_efficient_inclusive_scanPfS_j
.section .rodata,"a",@progbits
.globl _Z29work_efficient_inclusive_scanPfS_j
.p2align 3, 0x0
_Z29work_efficient_inclusive_scanPfS_j:
.quad _Z44__device_stub__work_efficient_inclusive_scanPfS_j
.size _Z29work_efficient_inclusive_scanPfS_j, 8
.type _Z31work_efficient_inclusive_scan_2PfS_j,@object # @_Z31work_efficient_inclusive_scan_2PfS_j
.globl _Z31work_efficient_inclusive_scan_2PfS_j
.p2align 3, 0x0
_Z31work_efficient_inclusive_scan_2PfS_j:
.quad _Z46__device_stub__work_efficient_inclusive_scan_2PfS_j
.size _Z31work_efficient_inclusive_scan_2PfS_j, 8
.type _Z31work_efficient_inclusive_scan_3PfS_S_j,@object # @_Z31work_efficient_inclusive_scan_3PfS_S_j
.globl _Z31work_efficient_inclusive_scan_3PfS_S_j
.p2align 3, 0x0
_Z31work_efficient_inclusive_scan_3PfS_S_j:
.quad _Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j
.size _Z31work_efficient_inclusive_scan_3PfS_S_j, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z29work_efficient_inclusive_scanPfS_j"
.size .L__unnamed_1, 39
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z31work_efficient_inclusive_scan_2PfS_j"
.size .L__unnamed_2, 41
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z31work_efficient_inclusive_scan_3PfS_S_j"
.size .L__unnamed_3, 43
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z44__device_stub__work_efficient_inclusive_scanPfS_j
.addrsig_sym _Z46__device_stub__work_efficient_inclusive_scan_2PfS_j
.addrsig_sym _Z46__device_stub__work_efficient_inclusive_scan_3PfS_S_j
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z29work_efficient_inclusive_scanPfS_j
.addrsig_sym _Z31work_efficient_inclusive_scan_2PfS_j
.addrsig_sym _Z31work_efficient_inclusive_scan_3PfS_S_j
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void kBlockify(float* source, float* target, int numdims, int blocksize) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
const int off = blockIdx.x * numdims;
for (unsigned int target_ind = idx; target_ind < numdims; target_ind += numThreads) {
const int block = target_ind / blocksize;
target[off + target_ind] = source[off + block * blocksize];
}
} | code for sm_80
Function : _Z9kBlockifyPfS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x001fda0003f06070 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ I2F.U32.RP R4, c[0x0][0x174] ; /* 0x00005d0000047b06 */
/* 0x000e220000209000 */
/*0050*/ S2R R11, SR_CTAID.X ; /* 0x00000000000b7919 */
/* 0x000e620000002500 */
/*0060*/ ISETP.NE.U32.AND P0, PT, RZ, c[0x0][0x174], PT ; /* 0x00005d00ff007a0c */
/* 0x000fe20003f05070 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ LOP3.LUT R9, RZ, c[0x0][0x174], RZ, 0x33, !PT ; /* 0x00005d00ff097a12 */
/* 0x000fc800078e33ff */
/*0090*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x001e240000001000 */
/*00a0*/ IADD3 R2, R4, 0xffffffe, RZ ; /* 0x0ffffffe04027810 */
/* 0x001fcc0007ffe0ff */
/*00b0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x0000a4000021f000 */
/*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x001fe400078e00ff */
/*00d0*/ IMAD.MOV R7, RZ, RZ, -R3 ; /* 0x000000ffff077224 */
/* 0x004fc800078e0a03 */
/*00e0*/ IMAD R7, R7, c[0x0][0x174], RZ ; /* 0x00005d0007077a24 */
/* 0x000fc800078e02ff */
/*00f0*/ IMAD.HI.U32 R7, R3, R7, R2 ; /* 0x0000000703077227 */
/* 0x002fcc00078e0002 */
/*0100*/ IMAD.HI.U32 R2, R7, R0, RZ ; /* 0x0000000007027227 */
/* 0x000fc800078e00ff */
/*0110*/ IMAD R4, R11, c[0x0][0x170], R0 ; /* 0x00005c000b047a24 */
/* 0x001fe200078e0200 */
/*0120*/ IADD3 R3, -R2, RZ, RZ ; /* 0x000000ff02037210 */
/* 0x000fe20007ffe1ff */
/*0130*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fc800078e00ff */
/*0140*/ IMAD R2, R3, c[0x0][0x174], R0 ; /* 0x00005d0003027a24 */
/* 0x000fca00078e0200 */
/*0150*/ ISETP.GE.U32.AND P1, PT, R2, c[0x0][0x174], PT ; /* 0x00005d0002007a0c */
/* 0x000fda0003f26070 */
/*0160*/ @P1 IADD3 R2, R2, -c[0x0][0x174], RZ ; /* 0x80005d0002021a10 */
/* 0x000fc80007ffe0ff */
/*0170*/ ISETP.GE.U32.AND P1, PT, R2, c[0x0][0x174], PT ; /* 0x00005d0002007a0c */
/* 0x000fda0003f26070 */
/*0180*/ @P1 IADD3 R2, R2, -c[0x0][0x174], RZ ; /* 0x80005d0002021a10 */
/* 0x000fc80007ffe0ff */
/*0190*/ SEL R3, R9, R2, !P0 ; /* 0x0000000209037207 */
/* 0x000fc80004000000 */
/*01a0*/ IADD3 R3, R4, -R3, RZ ; /* 0x8000000304037210 */
/* 0x000fca0007ffe0ff */
/*01b0*/ IMAD.WIDE R2, R3, R5, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fcc00078e0205 */
/*01c0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*01d0*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fe200078e0005 */
/*01e0*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */
/* 0x000fc80007ffe0ff */
/*01f0*/ ISETP.GE.U32.AND P1, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fe20003f26070 */
/*0200*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x0041d8000c101904 */
/*0210*/ @!P1 BRA 0x100 ; /* 0xfffffee000009947 */
/* 0x000fea000383ffff */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void kBlockify(float* source, float* target, int numdims, int blocksize) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
const int off = blockIdx.x * numdims;
for (unsigned int target_ind = idx; target_ind < numdims; target_ind += numThreads) {
const int block = target_ind / blocksize;
target[off + target_ind] = source[off + block * blocksize];
}
} | .file "tmpxft_000ca221_00000000-6_kBlockify.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9kBlockifyPfS_iiPfS_ii
.type _Z32__device_stub__Z9kBlockifyPfS_iiPfS_ii, @function
_Z32__device_stub__Z9kBlockifyPfS_iiPfS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9kBlockifyPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z32__device_stub__Z9kBlockifyPfS_iiPfS_ii, .-_Z32__device_stub__Z9kBlockifyPfS_iiPfS_ii
.globl _Z9kBlockifyPfS_ii
.type _Z9kBlockifyPfS_ii, @function
_Z9kBlockifyPfS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9kBlockifyPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9kBlockifyPfS_ii, .-_Z9kBlockifyPfS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9kBlockifyPfS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9kBlockifyPfS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void kBlockify(float* source, float* target, int numdims, int blocksize) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
const int off = blockIdx.x * numdims;
for (unsigned int target_ind = idx; target_ind < numdims; target_ind += numThreads) {
const int block = target_ind / blocksize;
target[off + target_ind] = source[off + block * blocksize];
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void kBlockify(float* source, float* target, int numdims, int blocksize) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
const int off = blockIdx.x * numdims;
for (unsigned int target_ind = idx; target_ind < numdims; target_ind += numThreads) {
const int block = target_ind / blocksize;
target[off + target_ind] = source[off + block * blocksize];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void kBlockify(float* source, float* target, int numdims, int blocksize) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
const int off = blockIdx.x * numdims;
for (unsigned int target_ind = idx; target_ind < numdims; target_ind += numThreads) {
const int block = target_ind / blocksize;
target[off + target_ind] = source[off + block * blocksize];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9kBlockifyPfS_ii
.globl _Z9kBlockifyPfS_ii
.p2align 8
.type _Z9kBlockifyPfS_ii,@function
_Z9kBlockifyPfS_ii:
s_load_b32 s2, s[0:1], 0x10
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_gt_u32_e64 s2, v0
s_cbranch_execz .LBB0_3
s_clause 0x2
s_load_b32 s3, s[0:1], 0x14
s_load_b32 s9, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
s_mul_i32 s15, s15, s2
s_waitcnt lgkmcnt(0)
v_cvt_f32_u32_e32 v1, s3
s_sub_i32 s8, 0, s3
s_and_b32 s1, s9, 0xffff
s_mov_b32 s9, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
v_cvt_u32_f32_e32 v1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v2, s8, v1
v_mul_hi_u32 v2, v1, v2
s_delay_alu instid0(VALU_DEP_1)
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v3, v1, v2
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v1, v0, v3
v_not_b32_e32 v7, v1
v_mad_u64_u32 v[4:5], null, s8, v1, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
v_mad_u64_u32 v[5:6], null, s3, v7, v[0:1]
v_add_nc_u32_e32 v1, s15, v0
v_add_nc_u32_e32 v0, s1, v0
v_cmp_le_u32_e32 vcc_lo, s3, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cmp_le_u32_e64 s0, s2, v0
v_cndmask_b32_e32 v4, v4, v5, vcc_lo
s_or_b32 s9, s0, s9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s3, v4
v_cmp_le_u32_e32 vcc_lo, s3, v4
v_cndmask_b32_e32 v4, v4, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v4, v1, v4
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[4:5]
v_add_co_u32 v4, vcc_lo, s4, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
global_load_b32 v6, v[4:5], off
v_lshlrev_b64 v[4:5], 2, v[1:2]
v_add_co_u32 v4, vcc_lo, s6, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[4:5], v6, off
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9kBlockifyPfS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9kBlockifyPfS_ii, .Lfunc_end0-_Z9kBlockifyPfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9kBlockifyPfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9kBlockifyPfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void kBlockify(float* source, float* target, int numdims, int blocksize) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
const int off = blockIdx.x * numdims;
for (unsigned int target_ind = idx; target_ind < numdims; target_ind += numThreads) {
const int block = target_ind / blocksize;
target[off + target_ind] = source[off + block * blocksize];
}
} | .text
.file "kBlockify.hip"
.globl _Z24__device_stub__kBlockifyPfS_ii # -- Begin function _Z24__device_stub__kBlockifyPfS_ii
.p2align 4, 0x90
.type _Z24__device_stub__kBlockifyPfS_ii,@function
_Z24__device_stub__kBlockifyPfS_ii: # @_Z24__device_stub__kBlockifyPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9kBlockifyPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__kBlockifyPfS_ii, .Lfunc_end0-_Z24__device_stub__kBlockifyPfS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9kBlockifyPfS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9kBlockifyPfS_ii,@object # @_Z9kBlockifyPfS_ii
.section .rodata,"a",@progbits
.globl _Z9kBlockifyPfS_ii
.p2align 3, 0x0
_Z9kBlockifyPfS_ii:
.quad _Z24__device_stub__kBlockifyPfS_ii
.size _Z9kBlockifyPfS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9kBlockifyPfS_ii"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__kBlockifyPfS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9kBlockifyPfS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9kBlockifyPfS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x001fda0003f06070 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ I2F.U32.RP R4, c[0x0][0x174] ; /* 0x00005d0000047b06 */
/* 0x000e220000209000 */
/*0050*/ S2R R11, SR_CTAID.X ; /* 0x00000000000b7919 */
/* 0x000e620000002500 */
/*0060*/ ISETP.NE.U32.AND P0, PT, RZ, c[0x0][0x174], PT ; /* 0x00005d00ff007a0c */
/* 0x000fe20003f05070 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ LOP3.LUT R9, RZ, c[0x0][0x174], RZ, 0x33, !PT ; /* 0x00005d00ff097a12 */
/* 0x000fc800078e33ff */
/*0090*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x001e240000001000 */
/*00a0*/ IADD3 R2, R4, 0xffffffe, RZ ; /* 0x0ffffffe04027810 */
/* 0x001fcc0007ffe0ff */
/*00b0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x0000a4000021f000 */
/*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x001fe400078e00ff */
/*00d0*/ IMAD.MOV R7, RZ, RZ, -R3 ; /* 0x000000ffff077224 */
/* 0x004fc800078e0a03 */
/*00e0*/ IMAD R7, R7, c[0x0][0x174], RZ ; /* 0x00005d0007077a24 */
/* 0x000fc800078e02ff */
/*00f0*/ IMAD.HI.U32 R7, R3, R7, R2 ; /* 0x0000000703077227 */
/* 0x002fcc00078e0002 */
/*0100*/ IMAD.HI.U32 R2, R7, R0, RZ ; /* 0x0000000007027227 */
/* 0x000fc800078e00ff */
/*0110*/ IMAD R4, R11, c[0x0][0x170], R0 ; /* 0x00005c000b047a24 */
/* 0x001fe200078e0200 */
/*0120*/ IADD3 R3, -R2, RZ, RZ ; /* 0x000000ff02037210 */
/* 0x000fe20007ffe1ff */
/*0130*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fc800078e00ff */
/*0140*/ IMAD R2, R3, c[0x0][0x174], R0 ; /* 0x00005d0003027a24 */
/* 0x000fca00078e0200 */
/*0150*/ ISETP.GE.U32.AND P1, PT, R2, c[0x0][0x174], PT ; /* 0x00005d0002007a0c */
/* 0x000fda0003f26070 */
/*0160*/ @P1 IADD3 R2, R2, -c[0x0][0x174], RZ ; /* 0x80005d0002021a10 */
/* 0x000fc80007ffe0ff */
/*0170*/ ISETP.GE.U32.AND P1, PT, R2, c[0x0][0x174], PT ; /* 0x00005d0002007a0c */
/* 0x000fda0003f26070 */
/*0180*/ @P1 IADD3 R2, R2, -c[0x0][0x174], RZ ; /* 0x80005d0002021a10 */
/* 0x000fc80007ffe0ff */
/*0190*/ SEL R3, R9, R2, !P0 ; /* 0x0000000209037207 */
/* 0x000fc80004000000 */
/*01a0*/ IADD3 R3, R4, -R3, RZ ; /* 0x8000000304037210 */
/* 0x000fca0007ffe0ff */
/*01b0*/ IMAD.WIDE R2, R3, R5, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fcc00078e0205 */
/*01c0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*01d0*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fe200078e0005 */
/*01e0*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */
/* 0x000fc80007ffe0ff */
/*01f0*/ ISETP.GE.U32.AND P1, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fe20003f26070 */
/*0200*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x0041d8000c101904 */
/*0210*/ @!P1 BRA 0x100 ; /* 0xfffffee000009947 */
/* 0x000fea000383ffff */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9kBlockifyPfS_ii
.globl _Z9kBlockifyPfS_ii
.p2align 8
.type _Z9kBlockifyPfS_ii,@function
_Z9kBlockifyPfS_ii:
s_load_b32 s2, s[0:1], 0x10
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_gt_u32_e64 s2, v0
s_cbranch_execz .LBB0_3
s_clause 0x2
s_load_b32 s3, s[0:1], 0x14
s_load_b32 s9, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
s_mul_i32 s15, s15, s2
s_waitcnt lgkmcnt(0)
v_cvt_f32_u32_e32 v1, s3
s_sub_i32 s8, 0, s3
s_and_b32 s1, s9, 0xffff
s_mov_b32 s9, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
v_cvt_u32_f32_e32 v1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v2, s8, v1
v_mul_hi_u32 v2, v1, v2
s_delay_alu instid0(VALU_DEP_1)
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v3, v1, v2
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v1, v0, v3
v_not_b32_e32 v7, v1
v_mad_u64_u32 v[4:5], null, s8, v1, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
v_mad_u64_u32 v[5:6], null, s3, v7, v[0:1]
v_add_nc_u32_e32 v1, s15, v0
v_add_nc_u32_e32 v0, s1, v0
v_cmp_le_u32_e32 vcc_lo, s3, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cmp_le_u32_e64 s0, s2, v0
v_cndmask_b32_e32 v4, v4, v5, vcc_lo
s_or_b32 s9, s0, s9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s3, v4
v_cmp_le_u32_e32 vcc_lo, s3, v4
v_cndmask_b32_e32 v4, v4, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v4, v1, v4
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[4:5]
v_add_co_u32 v4, vcc_lo, s4, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
global_load_b32 v6, v[4:5], off
v_lshlrev_b64 v[4:5], 2, v[1:2]
v_add_co_u32 v4, vcc_lo, s6, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[4:5], v6, off
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9kBlockifyPfS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9kBlockifyPfS_ii, .Lfunc_end0-_Z9kBlockifyPfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9kBlockifyPfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9kBlockifyPfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000ca221_00000000-6_kBlockify.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9kBlockifyPfS_iiPfS_ii
.type _Z32__device_stub__Z9kBlockifyPfS_iiPfS_ii, @function
_Z32__device_stub__Z9kBlockifyPfS_iiPfS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9kBlockifyPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z32__device_stub__Z9kBlockifyPfS_iiPfS_ii, .-_Z32__device_stub__Z9kBlockifyPfS_iiPfS_ii
.globl _Z9kBlockifyPfS_ii
.type _Z9kBlockifyPfS_ii, @function
_Z9kBlockifyPfS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9kBlockifyPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9kBlockifyPfS_ii, .-_Z9kBlockifyPfS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9kBlockifyPfS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9kBlockifyPfS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kBlockify.hip"
.globl _Z24__device_stub__kBlockifyPfS_ii # -- Begin function _Z24__device_stub__kBlockifyPfS_ii
.p2align 4, 0x90
.type _Z24__device_stub__kBlockifyPfS_ii,@function
_Z24__device_stub__kBlockifyPfS_ii: # @_Z24__device_stub__kBlockifyPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9kBlockifyPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__kBlockifyPfS_ii, .Lfunc_end0-_Z24__device_stub__kBlockifyPfS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9kBlockifyPfS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9kBlockifyPfS_ii,@object # @_Z9kBlockifyPfS_ii
.section .rodata,"a",@progbits
.globl _Z9kBlockifyPfS_ii
.p2align 3, 0x0
_Z9kBlockifyPfS_ii:
.quad _Z24__device_stub__kBlockifyPfS_ii
.size _Z9kBlockifyPfS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9kBlockifyPfS_ii"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__kBlockifyPfS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9kBlockifyPfS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | ////#include<math.h>
////#include<cuda.h>
////#include<helper_math.h>
//#include<device_launch_parameters.h>
//#include<cutil_math.h>
//#include<cutil_inline.h>
//#include<cutil_gl_inline.h>
//#include<cuda_gl_interop.h>
//////////////////////////////////for __syncthreads()
//#ifndef __CUDACC__
// #define __CUDACC__
//#endif
//
//#include<device_functions.h>
//
//
//float gain, xStart, yStart, zOffset, octaves, lacunarity;
//#define Z_PLANE 50.0f
//
//__constant__ unsigned char c_perm[256];
//__shared__ unsigned char s_perm[256]; ///shared memory copy of permutation array
//unsigned char * d_perm = NULL; ///global memory copy of permutation array
////host version of permutation array
//const static unsigned char h_perm[] = { 151,160,137,91,90,15,
// 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
// 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
// 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
// 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
// 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
// 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
// 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
// 223,183,170,213,119,248,152,2,44,154,163, 70,221,153,101,155,167, 43,172,9,
// 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
// 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
// 49,192,214, 31,181,199,106,157,184,84,204,176,115,121,50,45,127, 4,150,254,
// 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180
//};
//
//__device__ inline int perm(int i)
//{
// return(s_perm[i&0xff]);
//}
//
//__device__ inline float fade(float t)
//{
// return t * t*t*(t*(t*6.0f - 15.0f) + 10.0f);
//}
//
//__device__ inline float lerpP(float t, float a, float b)
//{
// return a + t * (b - a);
//}
//
//__device__ inline float grad(int hash, float x, float y, float z)
//{
// int h = hash & 15; //convert LO 4 bits of Hash code
// float u = h < 8 ? x : y, //into 12 gradient directions
// v = h < 4 ? y : h == 12 || h == 14 ? x : z;
// return ((h & 1) == 0 ? u : -u) + ((h & 2) == 0 ? v : -v);
//}
//
//
//
//__device__ float inoise(float x, float y, float z)
//{
// int X = ((int)floorf(x)) & 255, //Find unit cube
// Y = ((int)floorf(y)) & 255, //contains Point
// Z = ((int)floorf(z)) & 255;
//
//
// x -= floorf(x); //Find relative X,Y,Z
// y -= floorf(y); //of that point in cube
// z -= floorf(z);
//
// float u = fade(x), //compute fade curves
// v = fade(y),
// w = fade(z);
//
// int A = perm(X) + Y, AA = perm(A) + Z, AB = perm(A + 1) + Z, //HASH coordinates of
// B = perm(X + 1) + Y, BA = perm(B) + Z, BB = perm(B + 1) + Z; //the 8 cube corners
//
// return lerpP(w, lerpP(v, lerpP(u, grad(perm(AA), x, y, z),
// grad(perm(BA), x - 1.0f, y, z)),
// lerpP(u, grad(perm(AB), x, y - 1.0, z),
// grad(perm(BB), x - 1.0, y - 1.0, z))),
// lerpP(v, lerpP(u, grad(perm(AA + 1), x, y, z - 1.0f),
// grad(perm(BA + 1), x - 1.0f, y, z - 1.0f)),
// lerpP(u, grad(perm(AB + 1), x, y - 1.0f, z - 1.0f),
// grad(perm(BB + 1), x - 1.0, y - 1.0, z - 1.0))));
//
// return(perm(X));
//
//}
//
//
//
//__device__ float fBm(float x, float y, int octaves, float lacunarity = 2.0f, float gain = 0.5f)
//{
// float freq = 1.0f, amp = 0.5f;
// float sum = 0.0f;
// for (int i = 0; i < octaves; i++)
// {
// sum += inoise(x*freq, y*freq, Z_PLANE)*amp;
// freq *= lacunarity;
// amp *= gain;
// }
// return sum;
//}
//
//
//
//__device__ inline uchar4 colorElevation(float texHeight)
//{
// uchar4 pos;
//
// //color texel (r,g,b,a)
// if (texHeight < -1.000f) pos = make_uchar4(000, 000, 124, 255); //deeps
// else if (texHeight < -0.2500f) pos = make_uchar4(000, 000, 255, 255); //shallow
// else if (texHeight < 0.0000f) pos = make_uchar4(000, 128, 255, 255); //shore
// else if (texHeight < 0.0125f) pos = make_uchar4(240, 240, 064, 255); //sand
// else if (texHeight < 0.0125f) pos = make_uchar4(032, 160, 000, 255); //grass
// else if (texHeight < 0.3750f) pos = make_uchar4(224, 224, 000, 255); //dirt
// else if (texHeight < 0.7500f) pos = make_uchar4(128, 128, 128, 255);//rock
// else pos = make_uchar4(255, 255, 255, 255); //snow
//
// return(pos);
//
//
//}
//
//void checkCUDAError(const char *msg)
//{
// cudaError_t err = cudaGetLastError();
// if (cudaSuccess != err)
// {
// fprintf(stderr, "Cuda error : %s:%s", msg, cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//}
//
//
//
//
//
/////Simple Kernel fills an array with perlin noise
//__global__ void k_perlin(float4 *pos, uchar4 *colorPos,
// unsigned int width, unsigned int height,
// float2 start,float2 delta, float gain,
// float zOffset, unsigned char* d_perm,
// float ocataves, float lacunarity)
//{
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
// float xCur = start.x + ((float)(idx%width)) * delta.x;
// float yCur = start.x + ((float)(idx / width)) * delta.y;
//
// if (threadIdx.x < 256)
// //optimization:this causes bank conflicts
// s_perm[threadIdx.x] = d_perm[threadIdx.x];
// //this synchronization can be imp.if there are more than 256 threads
// __syncthreads();
//
// //Each thread creates one pixel location in the texture (textel)
// if (idx < width*height)
// {
// float w = fBm(xCur, yCur, ocataves, lacunarity, gain) + zOffset;
//
// colorPos[idx] = colorElevation(w);
// float u = ((float)(idx%width)) / (float)width;
// float v = ((float)(idx / width)) / (float)height;
// u = u * 2.0f-1.0f;
// v = v * 2.0f - 1.0f;
// w = (w > 0.0f) ? w : 0.0f; //dont show region underwater
// pos[idx] = make_float4(u, w, v, 1.0f);
//
// }
//}
//
//
//
//uchar4 *eColor = NULL;
////Wrapper for __global__ call that setups the kernel call
//extern "C" void launch_kernel(float4 *pos, uchar4 *posColor,
// unsigned int image_width, unsigned int image_height, float time)
//{
// int nThreads = 256; //must be equal or larger than 256!
// int totalThreads = image_height * image_width;
// int nBlocks = totalThreads / nThreads;
// nBlocks += ((totalThreads%nThreads) > 0) ? 1 : 0;
//
// float xExtent = 10.0f;
// float yExtent = 10.0f;
// float xDelta = xExtent / (float)image_width;
// float yDelta = yExtent / (float)image_height;
//
//
// if (!d_perm)
// {
// //for convenience allocate and copy d_perm here
// cudaMalloc((void**)&d_perm, sizeof(h_perm));
// cudaMemcpy(d_perm, h_perm, sizeof(h_perm), cudaMemcpyHostToDevice);
// checkCUDAError("d_perm malloc or copy failed!!");
// }
//
// k_perlin << <nBlocks, nThreads >> > (pos, posColor, image_width, image_height,
// make_float2(xStart, yStart),
// make_float2(xDelta, yDelta),
// gain, zOffset, d_perm,
// octaves, lacunarity);
//
// //make certain the kernel has completed
// cudaThreadSynchronize();
// checkCUDAError("kernel failed!!");
//
//}
//
//
//
//
// | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | ////#include<math.h>
////#include<cuda.h>
////#include<helper_math.h>
//#include<device_launch_parameters.h>
//#include<cutil_math.h>
//#include<cutil_inline.h>
//#include<cutil_gl_inline.h>
//#include<cuda_gl_interop.h>
//////////////////////////////////for __syncthreads()
//#ifndef __CUDACC__
// #define __CUDACC__
//#endif
//
//#include<device_functions.h>
//
//
//float gain, xStart, yStart, zOffset, octaves, lacunarity;
//#define Z_PLANE 50.0f
//
//__constant__ unsigned char c_perm[256];
//__shared__ unsigned char s_perm[256]; ///shared memory copy of permutation array
//unsigned char * d_perm = NULL; ///global memory copy of permutation array
////host version of permutation array
//const static unsigned char h_perm[] = { 151,160,137,91,90,15,
// 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
// 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
// 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
// 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
// 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
// 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
// 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
// 223,183,170,213,119,248,152,2,44,154,163, 70,221,153,101,155,167, 43,172,9,
// 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
// 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
// 49,192,214, 31,181,199,106,157,184,84,204,176,115,121,50,45,127, 4,150,254,
// 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180
//};
//
//__device__ inline int perm(int i)
//{
// return(s_perm[i&0xff]);
//}
//
//__device__ inline float fade(float t)
//{
// return t * t*t*(t*(t*6.0f - 15.0f) + 10.0f);
//}
//
//__device__ inline float lerpP(float t, float a, float b)
//{
// return a + t * (b - a);
//}
//
//__device__ inline float grad(int hash, float x, float y, float z)
//{
// int h = hash & 15; //convert LO 4 bits of Hash code
// float u = h < 8 ? x : y, //into 12 gradient directions
// v = h < 4 ? y : h == 12 || h == 14 ? x : z;
// return ((h & 1) == 0 ? u : -u) + ((h & 2) == 0 ? v : -v);
//}
//
//
//
//__device__ float inoise(float x, float y, float z)
//{
// int X = ((int)floorf(x)) & 255, //Find unit cube
// Y = ((int)floorf(y)) & 255, //contains Point
// Z = ((int)floorf(z)) & 255;
//
//
// x -= floorf(x); //Find relative X,Y,Z
// y -= floorf(y); //of that point in cube
// z -= floorf(z);
//
// float u = fade(x), //compute fade curves
// v = fade(y),
// w = fade(z);
//
// int A = perm(X) + Y, AA = perm(A) + Z, AB = perm(A + 1) + Z, //HASH coordinates of
// B = perm(X + 1) + Y, BA = perm(B) + Z, BB = perm(B + 1) + Z; //the 8 cube corners
//
// return lerpP(w, lerpP(v, lerpP(u, grad(perm(AA), x, y, z),
// grad(perm(BA), x - 1.0f, y, z)),
// lerpP(u, grad(perm(AB), x, y - 1.0, z),
// grad(perm(BB), x - 1.0, y - 1.0, z))),
// lerpP(v, lerpP(u, grad(perm(AA + 1), x, y, z - 1.0f),
// grad(perm(BA + 1), x - 1.0f, y, z - 1.0f)),
// lerpP(u, grad(perm(AB + 1), x, y - 1.0f, z - 1.0f),
// grad(perm(BB + 1), x - 1.0, y - 1.0, z - 1.0))));
//
// return(perm(X));
//
//}
//
//
//
//__device__ float fBm(float x, float y, int octaves, float lacunarity = 2.0f, float gain = 0.5f)
//{
// float freq = 1.0f, amp = 0.5f;
// float sum = 0.0f;
// for (int i = 0; i < octaves; i++)
// {
// sum += inoise(x*freq, y*freq, Z_PLANE)*amp;
// freq *= lacunarity;
// amp *= gain;
// }
// return sum;
//}
//
//
//
//__device__ inline uchar4 colorElevation(float texHeight)
//{
// uchar4 pos;
//
// //color texel (r,g,b,a)
// if (texHeight < -1.000f) pos = make_uchar4(000, 000, 124, 255); //deeps
// else if (texHeight < -0.2500f) pos = make_uchar4(000, 000, 255, 255); //shallow
// else if (texHeight < 0.0000f) pos = make_uchar4(000, 128, 255, 255); //shore
// else if (texHeight < 0.0125f) pos = make_uchar4(240, 240, 064, 255); //sand
// else if (texHeight < 0.0125f) pos = make_uchar4(032, 160, 000, 255); //grass
// else if (texHeight < 0.3750f) pos = make_uchar4(224, 224, 000, 255); //dirt
// else if (texHeight < 0.7500f) pos = make_uchar4(128, 128, 128, 255);//rock
// else pos = make_uchar4(255, 255, 255, 255); //snow
//
// return(pos);
//
//
//}
//
//void checkCUDAError(const char *msg)
//{
// cudaError_t err = cudaGetLastError();
// if (cudaSuccess != err)
// {
// fprintf(stderr, "Cuda error : %s:%s", msg, cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//}
//
//
//
//
//
/////Simple Kernel fills an array with perlin noise
//__global__ void k_perlin(float4 *pos, uchar4 *colorPos,
// unsigned int width, unsigned int height,
// float2 start,float2 delta, float gain,
// float zOffset, unsigned char* d_perm,
// float ocataves, float lacunarity)
//{
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
// float xCur = start.x + ((float)(idx%width)) * delta.x;
// float yCur = start.x + ((float)(idx / width)) * delta.y;
//
// if (threadIdx.x < 256)
// //optimization:this causes bank conflicts
// s_perm[threadIdx.x] = d_perm[threadIdx.x];
// //this synchronization can be imp.if there are more than 256 threads
// __syncthreads();
//
// //Each thread creates one pixel location in the texture (textel)
// if (idx < width*height)
// {
// float w = fBm(xCur, yCur, ocataves, lacunarity, gain) + zOffset;
//
// colorPos[idx] = colorElevation(w);
// float u = ((float)(idx%width)) / (float)width;
// float v = ((float)(idx / width)) / (float)height;
// u = u * 2.0f-1.0f;
// v = v * 2.0f - 1.0f;
// w = (w > 0.0f) ? w : 0.0f; //dont show region underwater
// pos[idx] = make_float4(u, w, v, 1.0f);
//
// }
//}
//
//
//
//uchar4 *eColor = NULL;
////Wrapper for __global__ call that setups the kernel call
//extern "C" void launch_kernel(float4 *pos, uchar4 *posColor,
// unsigned int image_width, unsigned int image_height, float time)
//{
// int nThreads = 256; //must be equal or larger than 256!
// int totalThreads = image_height * image_width;
// int nBlocks = totalThreads / nThreads;
// nBlocks += ((totalThreads%nThreads) > 0) ? 1 : 0;
//
// float xExtent = 10.0f;
// float yExtent = 10.0f;
// float xDelta = xExtent / (float)image_width;
// float yDelta = yExtent / (float)image_height;
//
//
// if (!d_perm)
// {
// //for convenience allocate and copy d_perm here
// cudaMalloc((void**)&d_perm, sizeof(h_perm));
// cudaMemcpy(d_perm, h_perm, sizeof(h_perm), cudaMemcpyHostToDevice);
// checkCUDAError("d_perm malloc or copy failed!!");
// }
//
// k_perlin << <nBlocks, nThreads >> > (pos, posColor, image_width, image_height,
// make_float2(xStart, yStart),
// make_float2(xDelta, yDelta),
// gain, zOffset, d_perm,
// octaves, lacunarity);
//
// //make certain the kernel has completed
// cudaThreadSynchronize();
// checkCUDAError("kernel failed!!");
//
//}
//
//
//
//
// | .file "tmpxft_0003636d_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | ////#include<math.h>
////#include<cuda.h>
////#include<helper_math.h>
//#include<device_launch_parameters.h>
//#include<cutil_math.h>
//#include<cutil_inline.h>
//#include<cutil_gl_inline.h>
//#include<cuda_gl_interop.h>
//////////////////////////////////for __syncthreads()
//#ifndef __CUDACC__
// #define __CUDACC__
//#endif
//
//#include<device_functions.h>
//
//
//float gain, xStart, yStart, zOffset, octaves, lacunarity;
//#define Z_PLANE 50.0f
//
//__constant__ unsigned char c_perm[256];
//__shared__ unsigned char s_perm[256]; ///shared memory copy of permutation array
//unsigned char * d_perm = NULL; ///global memory copy of permutation array
////host version of permutation array
//const static unsigned char h_perm[] = { 151,160,137,91,90,15,
// 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
// 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
// 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
// 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
// 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
// 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
// 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
// 223,183,170,213,119,248,152,2,44,154,163, 70,221,153,101,155,167, 43,172,9,
// 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
// 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
// 49,192,214, 31,181,199,106,157,184,84,204,176,115,121,50,45,127, 4,150,254,
// 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180
//};
//
//__device__ inline int perm(int i)
//{
// return(s_perm[i&0xff]);
//}
//
//__device__ inline float fade(float t)
//{
// return t * t*t*(t*(t*6.0f - 15.0f) + 10.0f);
//}
//
//__device__ inline float lerpP(float t, float a, float b)
//{
// return a + t * (b - a);
//}
//
//__device__ inline float grad(int hash, float x, float y, float z)
//{
// int h = hash & 15; //convert LO 4 bits of Hash code
// float u = h < 8 ? x : y, //into 12 gradient directions
// v = h < 4 ? y : h == 12 || h == 14 ? x : z;
// return ((h & 1) == 0 ? u : -u) + ((h & 2) == 0 ? v : -v);
//}
//
//
//
//__device__ float inoise(float x, float y, float z)
//{
// int X = ((int)floorf(x)) & 255, //Find unit cube
// Y = ((int)floorf(y)) & 255, //contains Point
// Z = ((int)floorf(z)) & 255;
//
//
// x -= floorf(x); //Find relative X,Y,Z
// y -= floorf(y); //of that point in cube
// z -= floorf(z);
//
// float u = fade(x), //compute fade curves
// v = fade(y),
// w = fade(z);
//
// int A = perm(X) + Y, AA = perm(A) + Z, AB = perm(A + 1) + Z, //HASH coordinates of
// B = perm(X + 1) + Y, BA = perm(B) + Z, BB = perm(B + 1) + Z; //the 8 cube corners
//
// return lerpP(w, lerpP(v, lerpP(u, grad(perm(AA), x, y, z),
// grad(perm(BA), x - 1.0f, y, z)),
// lerpP(u, grad(perm(AB), x, y - 1.0, z),
// grad(perm(BB), x - 1.0, y - 1.0, z))),
// lerpP(v, lerpP(u, grad(perm(AA + 1), x, y, z - 1.0f),
// grad(perm(BA + 1), x - 1.0f, y, z - 1.0f)),
// lerpP(u, grad(perm(AB + 1), x, y - 1.0f, z - 1.0f),
// grad(perm(BB + 1), x - 1.0, y - 1.0, z - 1.0))));
//
// return(perm(X));
//
//}
//
//
//
//__device__ float fBm(float x, float y, int octaves, float lacunarity = 2.0f, float gain = 0.5f)
//{
// float freq = 1.0f, amp = 0.5f;
// float sum = 0.0f;
// for (int i = 0; i < octaves; i++)
// {
// sum += inoise(x*freq, y*freq, Z_PLANE)*amp;
// freq *= lacunarity;
// amp *= gain;
// }
// return sum;
//}
//
//
//
//__device__ inline uchar4 colorElevation(float texHeight)
//{
// uchar4 pos;
//
// //color texel (r,g,b,a)
// if (texHeight < -1.000f) pos = make_uchar4(000, 000, 124, 255); //deeps
// else if (texHeight < -0.2500f) pos = make_uchar4(000, 000, 255, 255); //shallow
// else if (texHeight < 0.0000f) pos = make_uchar4(000, 128, 255, 255); //shore
// else if (texHeight < 0.0125f) pos = make_uchar4(240, 240, 064, 255); //sand
// else if (texHeight < 0.0125f) pos = make_uchar4(032, 160, 000, 255); //grass
// else if (texHeight < 0.3750f) pos = make_uchar4(224, 224, 000, 255); //dirt
// else if (texHeight < 0.7500f) pos = make_uchar4(128, 128, 128, 255);//rock
// else pos = make_uchar4(255, 255, 255, 255); //snow
//
// return(pos);
//
//
//}
//
//void checkCUDAError(const char *msg)
//{
// cudaError_t err = cudaGetLastError();
// if (cudaSuccess != err)
// {
// fprintf(stderr, "Cuda error : %s:%s", msg, cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//}
//
//
//
//
//
/////Simple Kernel fills an array with perlin noise
//__global__ void k_perlin(float4 *pos, uchar4 *colorPos,
// unsigned int width, unsigned int height,
// float2 start,float2 delta, float gain,
// float zOffset, unsigned char* d_perm,
// float ocataves, float lacunarity)
//{
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
// float xCur = start.x + ((float)(idx%width)) * delta.x;
// float yCur = start.x + ((float)(idx / width)) * delta.y;
//
// if (threadIdx.x < 256)
// //optimization:this causes bank conflicts
// s_perm[threadIdx.x] = d_perm[threadIdx.x];
// //this synchronization can be imp.if there are more than 256 threads
// __syncthreads();
//
// //Each thread creates one pixel location in the texture (textel)
// if (idx < width*height)
// {
// float w = fBm(xCur, yCur, ocataves, lacunarity, gain) + zOffset;
//
// colorPos[idx] = colorElevation(w);
// float u = ((float)(idx%width)) / (float)width;
// float v = ((float)(idx / width)) / (float)height;
// u = u * 2.0f-1.0f;
// v = v * 2.0f - 1.0f;
// w = (w > 0.0f) ? w : 0.0f; //dont show region underwater
// pos[idx] = make_float4(u, w, v, 1.0f);
//
// }
//}
//
//
//
//uchar4 *eColor = NULL;
////Wrapper for __global__ call that setups the kernel call
//extern "C" void launch_kernel(float4 *pos, uchar4 *posColor,
// unsigned int image_width, unsigned int image_height, float time)
//{
// int nThreads = 256; //must be equal or larger than 256!
// int totalThreads = image_height * image_width;
// int nBlocks = totalThreads / nThreads;
// nBlocks += ((totalThreads%nThreads) > 0) ? 1 : 0;
//
// float xExtent = 10.0f;
// float yExtent = 10.0f;
// float xDelta = xExtent / (float)image_width;
// float yDelta = yExtent / (float)image_height;
//
//
// if (!d_perm)
// {
// //for convenience allocate and copy d_perm here
// cudaMalloc((void**)&d_perm, sizeof(h_perm));
// cudaMemcpy(d_perm, h_perm, sizeof(h_perm), cudaMemcpyHostToDevice);
// checkCUDAError("d_perm malloc or copy failed!!");
// }
//
// k_perlin << <nBlocks, nThreads >> > (pos, posColor, image_width, image_height,
// make_float2(xStart, yStart),
// make_float2(xDelta, yDelta),
// gain, zOffset, d_perm,
// octaves, lacunarity);
//
// //make certain the kernel has completed
// cudaThreadSynchronize();
// checkCUDAError("kernel failed!!");
//
//}
//
//
//
//
// | #include <hip/hip_runtime.h>
////#include<math.h>
////#include<cuda.h>
////#include<helper_math.h>
//#include<device_launch_parameters.h>
//#include<cutil_math.h>
//#include<cutil_inline.h>
//#include<cutil_gl_inline.h>
//#include<cuda_gl_interop.h>
//////////////////////////////////for __syncthreads()
//#ifndef __CUDACC__
// #define __CUDACC__
//#endif
//
//#include<device_functions.h>
//
//
//float gain, xStart, yStart, zOffset, octaves, lacunarity;
//#define Z_PLANE 50.0f
//
//__constant__ unsigned char c_perm[256];
//__shared__ unsigned char s_perm[256]; ///shared memory copy of permutation array
//unsigned char * d_perm = NULL; ///global memory copy of permutation array
////host version of permutation array
//const static unsigned char h_perm[] = { 151,160,137,91,90,15,
// 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
// 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
// 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
// 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
// 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
// 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
// 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
// 223,183,170,213,119,248,152,2,44,154,163, 70,221,153,101,155,167, 43,172,9,
// 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
// 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
// 49,192,214, 31,181,199,106,157,184,84,204,176,115,121,50,45,127, 4,150,254,
// 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180
//};
//
//__device__ inline int perm(int i)
//{
// return(s_perm[i&0xff]);
//}
//
//__device__ inline float fade(float t)
//{
// return t * t*t*(t*(t*6.0f - 15.0f) + 10.0f);
//}
//
//__device__ inline float lerpP(float t, float a, float b)
//{
// return a + t * (b - a);
//}
//
//__device__ inline float grad(int hash, float x, float y, float z)
//{
// int h = hash & 15; //convert LO 4 bits of Hash code
// float u = h < 8 ? x : y, //into 12 gradient directions
// v = h < 4 ? y : h == 12 || h == 14 ? x : z;
// return ((h & 1) == 0 ? u : -u) + ((h & 2) == 0 ? v : -v);
//}
//
//
//
//__device__ float inoise(float x, float y, float z)
//{
// int X = ((int)floorf(x)) & 255, //Find unit cube
// Y = ((int)floorf(y)) & 255, //contains Point
// Z = ((int)floorf(z)) & 255;
//
//
// x -= floorf(x); //Find relative X,Y,Z
// y -= floorf(y); //of that point in cube
// z -= floorf(z);
//
// float u = fade(x), //compute fade curves
// v = fade(y),
// w = fade(z);
//
// int A = perm(X) + Y, AA = perm(A) + Z, AB = perm(A + 1) + Z, //HASH coordinates of
// B = perm(X + 1) + Y, BA = perm(B) + Z, BB = perm(B + 1) + Z; //the 8 cube corners
//
// return lerpP(w, lerpP(v, lerpP(u, grad(perm(AA), x, y, z),
// grad(perm(BA), x - 1.0f, y, z)),
// lerpP(u, grad(perm(AB), x, y - 1.0, z),
// grad(perm(BB), x - 1.0, y - 1.0, z))),
// lerpP(v, lerpP(u, grad(perm(AA + 1), x, y, z - 1.0f),
// grad(perm(BA + 1), x - 1.0f, y, z - 1.0f)),
// lerpP(u, grad(perm(AB + 1), x, y - 1.0f, z - 1.0f),
// grad(perm(BB + 1), x - 1.0, y - 1.0, z - 1.0))));
//
// return(perm(X));
//
//}
//
//
//
//__device__ float fBm(float x, float y, int octaves, float lacunarity = 2.0f, float gain = 0.5f)
//{
// float freq = 1.0f, amp = 0.5f;
// float sum = 0.0f;
// for (int i = 0; i < octaves; i++)
// {
// sum += inoise(x*freq, y*freq, Z_PLANE)*amp;
// freq *= lacunarity;
// amp *= gain;
// }
// return sum;
//}
//
//
//
//__device__ inline uchar4 colorElevation(float texHeight)
//{
// uchar4 pos;
//
// //color texel (r,g,b,a)
// if (texHeight < -1.000f) pos = make_uchar4(000, 000, 124, 255); //deeps
// else if (texHeight < -0.2500f) pos = make_uchar4(000, 000, 255, 255); //shallow
// else if (texHeight < 0.0000f) pos = make_uchar4(000, 128, 255, 255); //shore
// else if (texHeight < 0.0125f) pos = make_uchar4(240, 240, 064, 255); //sand
// else if (texHeight < 0.0125f) pos = make_uchar4(032, 160, 000, 255); //grass
// else if (texHeight < 0.3750f) pos = make_uchar4(224, 224, 000, 255); //dirt
// else if (texHeight < 0.7500f) pos = make_uchar4(128, 128, 128, 255);//rock
// else pos = make_uchar4(255, 255, 255, 255); //snow
//
// return(pos);
//
//
//}
//
//void checkCUDAError(const char *msg)
//{
// cudaError_t err = cudaGetLastError();
// if (cudaSuccess != err)
// {
// fprintf(stderr, "Cuda error : %s:%s", msg, cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//}
//
//
//
//
//
/////Simple Kernel fills an array with perlin noise
//__global__ void k_perlin(float4 *pos, uchar4 *colorPos,
// unsigned int width, unsigned int height,
// float2 start,float2 delta, float gain,
// float zOffset, unsigned char* d_perm,
// float ocataves, float lacunarity)
//{
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
// float xCur = start.x + ((float)(idx%width)) * delta.x;
// float yCur = start.x + ((float)(idx / width)) * delta.y;
//
// if (threadIdx.x < 256)
// //optimization:this causes bank conflicts
// s_perm[threadIdx.x] = d_perm[threadIdx.x];
// //this synchronization can be imp.if there are more than 256 threads
// __syncthreads();
//
// //Each thread creates one pixel location in the texture (textel)
// if (idx < width*height)
// {
// float w = fBm(xCur, yCur, ocataves, lacunarity, gain) + zOffset;
//
// colorPos[idx] = colorElevation(w);
// float u = ((float)(idx%width)) / (float)width;
// float v = ((float)(idx / width)) / (float)height;
// u = u * 2.0f-1.0f;
// v = v * 2.0f - 1.0f;
// w = (w > 0.0f) ? w : 0.0f; //dont show region underwater
// pos[idx] = make_float4(u, w, v, 1.0f);
//
// }
//}
//
//
//
//uchar4 *eColor = NULL;
////Wrapper for __global__ call that setups the kernel call
//extern "C" void launch_kernel(float4 *pos, uchar4 *posColor,
// unsigned int image_width, unsigned int image_height, float time)
//{
// int nThreads = 256; //must be equal or larger than 256!
// int totalThreads = image_height * image_width;
// int nBlocks = totalThreads / nThreads;
// nBlocks += ((totalThreads%nThreads) > 0) ? 1 : 0;
//
// float xExtent = 10.0f;
// float yExtent = 10.0f;
// float xDelta = xExtent / (float)image_width;
// float yDelta = yExtent / (float)image_height;
//
//
// if (!d_perm)
// {
// //for convenience allocate and copy d_perm here
// cudaMalloc((void**)&d_perm, sizeof(h_perm));
// cudaMemcpy(d_perm, h_perm, sizeof(h_perm), cudaMemcpyHostToDevice);
// checkCUDAError("d_perm malloc or copy failed!!");
// }
//
// k_perlin << <nBlocks, nThreads >> > (pos, posColor, image_width, image_height,
// make_float2(xStart, yStart),
// make_float2(xDelta, yDelta),
// gain, zOffset, d_perm,
// octaves, lacunarity);
//
// //make certain the kernel has completed
// cudaThreadSynchronize();
// checkCUDAError("kernel failed!!");
//
//}
//
//
//
//
// |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
////#include<math.h>
////#include<cuda.h>
////#include<helper_math.h>
//#include<device_launch_parameters.h>
//#include<cutil_math.h>
//#include<cutil_inline.h>
//#include<cutil_gl_inline.h>
//#include<cuda_gl_interop.h>
//////////////////////////////////for __syncthreads()
//#ifndef __CUDACC__
// #define __CUDACC__
//#endif
//
//#include<device_functions.h>
//
//
//float gain, xStart, yStart, zOffset, octaves, lacunarity;
//#define Z_PLANE 50.0f
//
//__constant__ unsigned char c_perm[256];
//__shared__ unsigned char s_perm[256]; ///shared memory copy of permutation array
//unsigned char * d_perm = NULL; ///global memory copy of permutation array
////host version of permutation array
//const static unsigned char h_perm[] = { 151,160,137,91,90,15,
// 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
// 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
// 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
// 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
// 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
// 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
// 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
// 223,183,170,213,119,248,152,2,44,154,163, 70,221,153,101,155,167, 43,172,9,
// 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
// 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
// 49,192,214, 31,181,199,106,157,184,84,204,176,115,121,50,45,127, 4,150,254,
// 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180
//};
//
//__device__ inline int perm(int i)
//{
// return(s_perm[i&0xff]);
//}
//
//__device__ inline float fade(float t)
//{
// return t * t*t*(t*(t*6.0f - 15.0f) + 10.0f);
//}
//
//__device__ inline float lerpP(float t, float a, float b)
//{
// return a + t * (b - a);
//}
//
//__device__ inline float grad(int hash, float x, float y, float z)
//{
// int h = hash & 15; //convert LO 4 bits of Hash code
// float u = h < 8 ? x : y, //into 12 gradient directions
// v = h < 4 ? y : h == 12 || h == 14 ? x : z;
// return ((h & 1) == 0 ? u : -u) + ((h & 2) == 0 ? v : -v);
//}
//
//
//
//__device__ float inoise(float x, float y, float z)
//{
// int X = ((int)floorf(x)) & 255, //Find unit cube
// Y = ((int)floorf(y)) & 255, //contains Point
// Z = ((int)floorf(z)) & 255;
//
//
// x -= floorf(x); //Find relative X,Y,Z
// y -= floorf(y); //of that point in cube
// z -= floorf(z);
//
// float u = fade(x), //compute fade curves
// v = fade(y),
// w = fade(z);
//
// int A = perm(X) + Y, AA = perm(A) + Z, AB = perm(A + 1) + Z, //HASH coordinates of
// B = perm(X + 1) + Y, BA = perm(B) + Z, BB = perm(B + 1) + Z; //the 8 cube corners
//
// return lerpP(w, lerpP(v, lerpP(u, grad(perm(AA), x, y, z),
// grad(perm(BA), x - 1.0f, y, z)),
// lerpP(u, grad(perm(AB), x, y - 1.0, z),
// grad(perm(BB), x - 1.0, y - 1.0, z))),
// lerpP(v, lerpP(u, grad(perm(AA + 1), x, y, z - 1.0f),
// grad(perm(BA + 1), x - 1.0f, y, z - 1.0f)),
// lerpP(u, grad(perm(AB + 1), x, y - 1.0f, z - 1.0f),
// grad(perm(BB + 1), x - 1.0, y - 1.0, z - 1.0))));
//
// return(perm(X));
//
//}
//
//
//
//__device__ float fBm(float x, float y, int octaves, float lacunarity = 2.0f, float gain = 0.5f)
//{
// float freq = 1.0f, amp = 0.5f;
// float sum = 0.0f;
// for (int i = 0; i < octaves; i++)
// {
// sum += inoise(x*freq, y*freq, Z_PLANE)*amp;
// freq *= lacunarity;
// amp *= gain;
// }
// return sum;
//}
//
//
//
//__device__ inline uchar4 colorElevation(float texHeight)
//{
// uchar4 pos;
//
// //color texel (r,g,b,a)
// if (texHeight < -1.000f) pos = make_uchar4(000, 000, 124, 255); //deeps
// else if (texHeight < -0.2500f) pos = make_uchar4(000, 000, 255, 255); //shallow
// else if (texHeight < 0.0000f) pos = make_uchar4(000, 128, 255, 255); //shore
// else if (texHeight < 0.0125f) pos = make_uchar4(240, 240, 064, 255); //sand
// else if (texHeight < 0.0125f) pos = make_uchar4(032, 160, 000, 255); //grass
// else if (texHeight < 0.3750f) pos = make_uchar4(224, 224, 000, 255); //dirt
// else if (texHeight < 0.7500f) pos = make_uchar4(128, 128, 128, 255);//rock
// else pos = make_uchar4(255, 255, 255, 255); //snow
//
// return(pos);
//
//
//}
//
//void checkCUDAError(const char *msg)
//{
// cudaError_t err = cudaGetLastError();
// if (cudaSuccess != err)
// {
// fprintf(stderr, "Cuda error : %s:%s", msg, cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//}
//
//
//
//
//
/////Simple Kernel fills an array with perlin noise
//__global__ void k_perlin(float4 *pos, uchar4 *colorPos,
// unsigned int width, unsigned int height,
// float2 start,float2 delta, float gain,
// float zOffset, unsigned char* d_perm,
// float ocataves, float lacunarity)
//{
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
// float xCur = start.x + ((float)(idx%width)) * delta.x;
// float yCur = start.x + ((float)(idx / width)) * delta.y;
//
// if (threadIdx.x < 256)
// //optimization:this causes bank conflicts
// s_perm[threadIdx.x] = d_perm[threadIdx.x];
// //this synchronization can be imp.if there are more than 256 threads
// __syncthreads();
//
// //Each thread creates one pixel location in the texture (textel)
// if (idx < width*height)
// {
// float w = fBm(xCur, yCur, ocataves, lacunarity, gain) + zOffset;
//
// colorPos[idx] = colorElevation(w);
// float u = ((float)(idx%width)) / (float)width;
// float v = ((float)(idx / width)) / (float)height;
// u = u * 2.0f-1.0f;
// v = v * 2.0f - 1.0f;
// w = (w > 0.0f) ? w : 0.0f; //dont show region underwater
// pos[idx] = make_float4(u, w, v, 1.0f);
//
// }
//}
//
//
//
//uchar4 *eColor = NULL;
////Wrapper for __global__ call that setups the kernel call
//extern "C" void launch_kernel(float4 *pos, uchar4 *posColor,
// unsigned int image_width, unsigned int image_height, float time)
//{
// int nThreads = 256; //must be equal or larger than 256!
// int totalThreads = image_height * image_width;
// int nBlocks = totalThreads / nThreads;
// nBlocks += ((totalThreads%nThreads) > 0) ? 1 : 0;
//
// float xExtent = 10.0f;
// float yExtent = 10.0f;
// float xDelta = xExtent / (float)image_width;
// float yDelta = yExtent / (float)image_height;
//
//
// if (!d_perm)
// {
// //for convenience allocate and copy d_perm here
// cudaMalloc((void**)&d_perm, sizeof(h_perm));
// cudaMemcpy(d_perm, h_perm, sizeof(h_perm), cudaMemcpyHostToDevice);
// checkCUDAError("d_perm malloc or copy failed!!");
// }
//
// k_perlin << <nBlocks, nThreads >> > (pos, posColor, image_width, image_height,
// make_float2(xStart, yStart),
// make_float2(xDelta, yDelta),
// gain, zOffset, d_perm,
// octaves, lacunarity);
//
// //make certain the kernel has completed
// cudaThreadSynchronize();
// checkCUDAError("kernel failed!!");
//
//}
//
//
//
//
// | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
////#include<math.h>
////#include<cuda.h>
////#include<helper_math.h>
//#include<device_launch_parameters.h>
//#include<cutil_math.h>
//#include<cutil_inline.h>
//#include<cutil_gl_inline.h>
//#include<cuda_gl_interop.h>
//////////////////////////////////for __syncthreads()
//#ifndef __CUDACC__
// #define __CUDACC__
//#endif
//
//#include<device_functions.h>
//
//
//float gain, xStart, yStart, zOffset, octaves, lacunarity;
//#define Z_PLANE 50.0f
//
//__constant__ unsigned char c_perm[256];
//__shared__ unsigned char s_perm[256]; ///shared memory copy of permutation array
//unsigned char * d_perm = NULL; ///global memory copy of permutation array
////host version of permutation array
//const static unsigned char h_perm[] = { 151,160,137,91,90,15,
// 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
// 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
// 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
// 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
// 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
// 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
// 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
// 223,183,170,213,119,248,152,2,44,154,163, 70,221,153,101,155,167, 43,172,9,
// 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
// 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
// 49,192,214, 31,181,199,106,157,184,84,204,176,115,121,50,45,127, 4,150,254,
// 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180
//};
//
//__device__ inline int perm(int i)
//{
// return(s_perm[i&0xff]);
//}
//
//__device__ inline float fade(float t)
//{
// return t * t*t*(t*(t*6.0f - 15.0f) + 10.0f);
//}
//
//__device__ inline float lerpP(float t, float a, float b)
//{
// return a + t * (b - a);
//}
//
//__device__ inline float grad(int hash, float x, float y, float z)
//{
// int h = hash & 15; //convert LO 4 bits of Hash code
// float u = h < 8 ? x : y, //into 12 gradient directions
// v = h < 4 ? y : h == 12 || h == 14 ? x : z;
// return ((h & 1) == 0 ? u : -u) + ((h & 2) == 0 ? v : -v);
//}
//
//
//
//__device__ float inoise(float x, float y, float z)
//{
// int X = ((int)floorf(x)) & 255, //Find unit cube
// Y = ((int)floorf(y)) & 255, //contains Point
// Z = ((int)floorf(z)) & 255;
//
//
// x -= floorf(x); //Find relative X,Y,Z
// y -= floorf(y); //of that point in cube
// z -= floorf(z);
//
// float u = fade(x), //compute fade curves
// v = fade(y),
// w = fade(z);
//
// int A = perm(X) + Y, AA = perm(A) + Z, AB = perm(A + 1) + Z, //HASH coordinates of
// B = perm(X + 1) + Y, BA = perm(B) + Z, BB = perm(B + 1) + Z; //the 8 cube corners
//
// return lerpP(w, lerpP(v, lerpP(u, grad(perm(AA), x, y, z),
// grad(perm(BA), x - 1.0f, y, z)),
// lerpP(u, grad(perm(AB), x, y - 1.0, z),
// grad(perm(BB), x - 1.0, y - 1.0, z))),
// lerpP(v, lerpP(u, grad(perm(AA + 1), x, y, z - 1.0f),
// grad(perm(BA + 1), x - 1.0f, y, z - 1.0f)),
// lerpP(u, grad(perm(AB + 1), x, y - 1.0f, z - 1.0f),
// grad(perm(BB + 1), x - 1.0, y - 1.0, z - 1.0))));
//
// return(perm(X));
//
//}
//
//
//
//__device__ float fBm(float x, float y, int octaves, float lacunarity = 2.0f, float gain = 0.5f)
//{
// float freq = 1.0f, amp = 0.5f;
// float sum = 0.0f;
// for (int i = 0; i < octaves; i++)
// {
// sum += inoise(x*freq, y*freq, Z_PLANE)*amp;
// freq *= lacunarity;
// amp *= gain;
// }
// return sum;
//}
//
//
//
//__device__ inline uchar4 colorElevation(float texHeight)
//{
// uchar4 pos;
//
// //color texel (r,g,b,a)
// if (texHeight < -1.000f) pos = make_uchar4(000, 000, 124, 255); //deeps
// else if (texHeight < -0.2500f) pos = make_uchar4(000, 000, 255, 255); //shallow
// else if (texHeight < 0.0000f) pos = make_uchar4(000, 128, 255, 255); //shore
// else if (texHeight < 0.0125f) pos = make_uchar4(240, 240, 064, 255); //sand
// else if (texHeight < 0.0125f) pos = make_uchar4(032, 160, 000, 255); //grass
// else if (texHeight < 0.3750f) pos = make_uchar4(224, 224, 000, 255); //dirt
// else if (texHeight < 0.7500f) pos = make_uchar4(128, 128, 128, 255);//rock
// else pos = make_uchar4(255, 255, 255, 255); //snow
//
// return(pos);
//
//
//}
//
//void checkCUDAError(const char *msg)
//{
// cudaError_t err = cudaGetLastError();
// if (cudaSuccess != err)
// {
// fprintf(stderr, "Cuda error : %s:%s", msg, cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
//}
//
//
//
//
//
/////Simple Kernel fills an array with perlin noise
//__global__ void k_perlin(float4 *pos, uchar4 *colorPos,
// unsigned int width, unsigned int height,
// float2 start,float2 delta, float gain,
// float zOffset, unsigned char* d_perm,
// float ocataves, float lacunarity)
//{
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
// float xCur = start.x + ((float)(idx%width)) * delta.x;
// float yCur = start.x + ((float)(idx / width)) * delta.y;
//
// if (threadIdx.x < 256)
// //optimization:this causes bank conflicts
// s_perm[threadIdx.x] = d_perm[threadIdx.x];
// //this synchronization can be imp.if there are more than 256 threads
// __syncthreads();
//
// //Each thread creates one pixel location in the texture (textel)
// if (idx < width*height)
// {
// float w = fBm(xCur, yCur, ocataves, lacunarity, gain) + zOffset;
//
// colorPos[idx] = colorElevation(w);
// float u = ((float)(idx%width)) / (float)width;
// float v = ((float)(idx / width)) / (float)height;
// u = u * 2.0f-1.0f;
// v = v * 2.0f - 1.0f;
// w = (w > 0.0f) ? w : 0.0f; //dont show region underwater
// pos[idx] = make_float4(u, w, v, 1.0f);
//
// }
//}
//
//
//
//uchar4 *eColor = NULL;
////Wrapper for __global__ call that setups the kernel call
//extern "C" void launch_kernel(float4 *pos, uchar4 *posColor,
// unsigned int image_width, unsigned int image_height, float time)
//{
// int nThreads = 256; //must be equal or larger than 256!
// int totalThreads = image_height * image_width;
// int nBlocks = totalThreads / nThreads;
// nBlocks += ((totalThreads%nThreads) > 0) ? 1 : 0;
//
// float xExtent = 10.0f;
// float yExtent = 10.0f;
// float xDelta = xExtent / (float)image_width;
// float yDelta = yExtent / (float)image_height;
//
//
// if (!d_perm)
// {
// //for convenience allocate and copy d_perm here
// cudaMalloc((void**)&d_perm, sizeof(h_perm));
// cudaMemcpy(d_perm, h_perm, sizeof(h_perm), cudaMemcpyHostToDevice);
// checkCUDAError("d_perm malloc or copy failed!!");
// }
//
// k_perlin << <nBlocks, nThreads >> > (pos, posColor, image_width, image_height,
// make_float2(xStart, yStart),
// make_float2(xDelta, yDelta),
// gain, zOffset, d_perm,
// octaves, lacunarity);
//
// //make certain the kernel has completed
// cudaThreadSynchronize();
// checkCUDAError("kernel failed!!");
//
//}
//
//
//
//
// | .text
.file "kernel.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0003636d_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#define MAX_ARRAY_SIZE 1000000
/*
* Read data from ./inp.txt
* Store the data in (int * data)
* Return the number of elements read into the array
*/
int * read_data(int * size)
{
FILE * fptr = fopen("./inp.txt", "r");
if (!fptr) {
printf("!! Error in opening data file \n");
exit(1);
}
int cur_array_size = MAX_ARRAY_SIZE;
int * buffer = (int *)malloc(cur_array_size * sizeof(int));
int i = 0;
while (!feof(fptr)) {
if (fscanf(fptr, "%d,", &buffer[i]) != 1) {
break;
}
++i;
}
fclose(fptr);
*size = i;
return buffer;
}
/*
* Round up to the nearest power of 2
*/
int round_up_pow2(int val) {
if (val == 0) return 1;
int pow2 = 1;
while (pow2 < val) {
pow2 <<= 1;
}
return pow2;
}
/*
* Calculate the number of threads per block based on array size
* The function is so designed that a reduction on the array can
* be completed in two steps.
* The assumption is that the size of the array is no more than
* 1,000,000, such that the number of threads is no more than
* 1024, which is the computational limit of the GPU device.
*/
int calc_num_thread(int size) {
int approx = (int)sqrt((double)size);
// find the nearest power of 2
return round_up_pow2(approx);
}
/*
* GPU kernel for part a: reduction, getting the min value in a sub-array
*/
__global__ void shmem_reduce_kernel(int * d_out, const int * d_in, const int size)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s && (myId + s) < size)
{
if (sdata[tid] > sdata[tid + s])
sdata[tid] = sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
/*
* Reduction-based algorithm to find the min value in (int * d_in)
*/
void reduce(int * d_out, int * d_intermediate, int * d_in, int size)
{
// assumes that size is not greater than maxThreadsPerBlock^2
const int maxThreadsPerBlock = calc_num_thread(size);
int threads = maxThreadsPerBlock;
int blocks = (size + maxThreadsPerBlock - 1) / maxThreadsPerBlock;
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>(d_intermediate, d_in, size);
// now we're down to one block left, so reduce it
threads = blocks;
blocks = 1;
shmem_reduce_kernel<<<blocks, round_up_pow2(threads), threads * sizeof(int)>>>(d_out, d_intermediate, threads);
}
/*
* GPU kernel for part b: calculate the last digit of each element in the input array in parallel
*/
__global__ void last_digit_kernel(int * d_out, const int * d_in, const int size)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId < size)
d_out[myId] = d_in[myId] % 10;
}
int main(void)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("!! Error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
// data array on host
int array_size = 0;
int * h_in = read_data(&array_size);
int array_byte = array_size * sizeof(int);
// printf(">> Number of data read in: %d\n", array_size);
/*
* part a
*/
// declare GPU memory pointers
int * d_in, * d_intermediate, * d_out;
// allocate GPU memory
cudaMalloc((void **) &d_in, array_byte);
cudaMalloc((void **) &d_intermediate, array_byte);
cudaMalloc((void **) &d_out, sizeof(int));
// transfer the input array to the GPU
cudaMemcpy(d_in, h_in, array_byte, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// launch the kernel
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_in, array_size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
// copy back the min from GPU
int h_out;
cudaMemcpy(&h_out, d_out, sizeof(int), cudaMemcpyDeviceToHost);
// printf(">> Average time elapsed in part a: %f\n", elapsedTime);
// printf(">> Min value returned by device: %d\n", h_out);
// output the result into file
FILE * fptr_a = fopen("./q1a.txt", "w");
if (!fptr_a) {
printf("!! Error in opening output file \n");
exit(1);
}
fprintf(fptr_a, "%d", h_out);
fclose(fptr_a);
// free GPU memory allocation
// reuse d_in for the input array of part b
// reuse d_intermediate for the output array of part b
cudaFree(d_out);
/*
* part b
*/
d_out = d_intermediate;
int numThreadPerBlock = calc_num_thread(array_size);
int numBlock = (array_size + numThreadPerBlock - 1) / numThreadPerBlock;
// launch the kernel
cudaEventRecord(start, 0);
last_digit_kernel<<<numBlock, numThreadPerBlock>>>(d_out, d_in, array_size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
// printf(">> Average time elapsed of part b: %f\n", elapsedTime);
// copy back the result array from GPU
int * h_out_array = (int *)malloc(array_byte);
cudaMemcpy(h_out_array, d_out, array_byte, cudaMemcpyDeviceToHost);
// output the result array into file
FILE * fptr_b = fopen("./q1b.txt", "w");
if (!fptr_b) {
printf("!! Error in opening output file \n");
exit(1);
}
for (int i = 0; i < array_size; ++i) {
fprintf(fptr_b, "%d", h_out_array[i]);
if (i < array_size - 1)
fprintf(fptr_b, ", ");
}
fclose(fptr_b);
// free CPU memory allocation
free(h_in);
free(h_out_array);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_intermediate);
return 0;
} | code for sm_80
Function : _Z17last_digit_kernelPiPKii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R4, R3, c[0x0][0x0], R4 ; /* 0x0000000003047a24 */
/* 0x001fca00078e0204 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R9, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fcc00078e0209 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ IMAD.HI R0, R2, 0x66666667, RZ ; /* 0x6666666702007827 */
/* 0x004fca00078e02ff */
/*00b0*/ SHF.R.U32.HI R5, RZ, 0x1f, R0 ; /* 0x0000001fff057819 */
/* 0x000fc80000011600 */
/*00c0*/ LEA.HI.SX32 R7, R0, R5, 0x1e ; /* 0x0000000500077211 */
/* 0x000fe200078ff2ff */
/*00d0*/ IMAD.WIDE R4, R4, R9, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fc800078e0209 */
/*00e0*/ IMAD R7, R7, -0xa, R2 ; /* 0xfffffff607077824 */
/* 0x000fca00078e0202 */
/*00f0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z19shmem_reduce_kernelPiPKii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R6, c[0x0][0x0], R7 ; /* 0x0000000006007a24 */
/* 0x001fc800078e0207 */
/*0060*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe40000000800 */
/*0090*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00a0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf05270 */
/*00b0*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041e80000004800 */
/*00c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00d0*/ @!P0 BRA 0x1f0 ; /* 0x0000011000008947 */
/* 0x000fea0003800000 */
/*00e0*/ SHF.L.U32 R2, R7, 0x2, RZ ; /* 0x0000000207027819 */
/* 0x001fe200000006ff */
/*00f0*/ IMAD.U32 R3, RZ, RZ, UR4 ; /* 0x00000004ff037e24 */
/* 0x000fca000f8e00ff */
/*0100*/ IADD3 R4, R0, R3, RZ ; /* 0x0000000300047210 */
/* 0x000fe20007ffe0ff */
/*0110*/ BSSY B0, 0x1b0 ; /* 0x0000009000007945 */
/* 0x000fe60003800000 */
/*0120*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fc80003f06070 */
/*0130*/ ISETP.GE.U32.OR P0, PT, R7, R3, P0 ; /* 0x000000030700720c */
/* 0x000fda0000706470 */
/*0140*/ @P0 BRA 0x1a0 ; /* 0x0000005000000947 */
/* 0x001fea0003800000 */
/*0150*/ IMAD R5, R3, 0x4, R2 ; /* 0x0000000403057824 */
/* 0x000fe200078e0202 */
/*0160*/ LDS R4, [R7.X4] ; /* 0x0000000007047984 */
/* 0x000fea0000004800 */
/*0170*/ LDS R5, [R5] ; /* 0x0000000005057984 */
/* 0x000e240000000800 */
/*0180*/ ISETP.GT.AND P0, PT, R4, R5, PT ; /* 0x000000050400720c */
/* 0x001fda0003f04270 */
/*0190*/ @P0 STS [R7.X4], R5 ; /* 0x0000000507000388 */
/* 0x0001e40000004800 */
/*01a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*01b0*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fe20000011603 */
/*01c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe60000010000 */
/*01d0*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f05270 */
/*01e0*/ @P0 BRA 0x100 ; /* 0xffffff1000000947 */
/* 0x000fea000383ffff */
/*01f0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x001fda0003f05270 */
/*0200*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0210*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*0220*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fd400000001ff */
/*0230*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fca00078e0003 */
/*0240*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0250*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0260*/ BRA 0x260; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#define MAX_ARRAY_SIZE 1000000
/*
* Read data from ./inp.txt
* Store the data in (int * data)
* Return the number of elements read into the array
*/
int * read_data(int * size)
{
FILE * fptr = fopen("./inp.txt", "r");
if (!fptr) {
printf("!! Error in opening data file \n");
exit(1);
}
int cur_array_size = MAX_ARRAY_SIZE;
int * buffer = (int *)malloc(cur_array_size * sizeof(int));
int i = 0;
while (!feof(fptr)) {
if (fscanf(fptr, "%d,", &buffer[i]) != 1) {
break;
}
++i;
}
fclose(fptr);
*size = i;
return buffer;
}
/*
* Round up to the nearest power of 2
*/
int round_up_pow2(int val) {
if (val == 0) return 1;
int pow2 = 1;
while (pow2 < val) {
pow2 <<= 1;
}
return pow2;
}
/*
* Calculate the number of threads per block based on array size
* The function is so designed that a reduction on the array can
* be completed in two steps.
* The assumption is that the size of the array is no more than
* 1,000,000, such that the number of threads is no more than
* 1024, which is the computational limit of the GPU device.
*/
int calc_num_thread(int size) {
int approx = (int)sqrt((double)size);
// find the nearest power of 2
return round_up_pow2(approx);
}
/*
* GPU kernel for part a: reduction, getting the min value in a sub-array
*/
__global__ void shmem_reduce_kernel(int * d_out, const int * d_in, const int size)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s && (myId + s) < size)
{
if (sdata[tid] > sdata[tid + s])
sdata[tid] = sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
/*
* Reduction-based algorithm to find the min value in (int * d_in)
*/
void reduce(int * d_out, int * d_intermediate, int * d_in, int size)
{
// assumes that size is not greater than maxThreadsPerBlock^2
const int maxThreadsPerBlock = calc_num_thread(size);
int threads = maxThreadsPerBlock;
int blocks = (size + maxThreadsPerBlock - 1) / maxThreadsPerBlock;
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>(d_intermediate, d_in, size);
// now we're down to one block left, so reduce it
threads = blocks;
blocks = 1;
shmem_reduce_kernel<<<blocks, round_up_pow2(threads), threads * sizeof(int)>>>(d_out, d_intermediate, threads);
}
/*
* GPU kernel for part b: calculate the last digit of each element in the input array in parallel
*/
__global__ void last_digit_kernel(int * d_out, const int * d_in, const int size)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId < size)
d_out[myId] = d_in[myId] % 10;
}
int main(void)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("!! Error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
// data array on host
int array_size = 0;
int * h_in = read_data(&array_size);
int array_byte = array_size * sizeof(int);
// printf(">> Number of data read in: %d\n", array_size);
/*
* part a
*/
// declare GPU memory pointers
int * d_in, * d_intermediate, * d_out;
// allocate GPU memory
cudaMalloc((void **) &d_in, array_byte);
cudaMalloc((void **) &d_intermediate, array_byte);
cudaMalloc((void **) &d_out, sizeof(int));
// transfer the input array to the GPU
cudaMemcpy(d_in, h_in, array_byte, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// launch the kernel
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_in, array_size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
// copy back the min from GPU
int h_out;
cudaMemcpy(&h_out, d_out, sizeof(int), cudaMemcpyDeviceToHost);
// printf(">> Average time elapsed in part a: %f\n", elapsedTime);
// printf(">> Min value returned by device: %d\n", h_out);
// output the result into file
FILE * fptr_a = fopen("./q1a.txt", "w");
if (!fptr_a) {
printf("!! Error in opening output file \n");
exit(1);
}
fprintf(fptr_a, "%d", h_out);
fclose(fptr_a);
// free GPU memory allocation
// reuse d_in for the input array of part b
// reuse d_intermediate for the output array of part b
cudaFree(d_out);
/*
* part b
*/
d_out = d_intermediate;
int numThreadPerBlock = calc_num_thread(array_size);
int numBlock = (array_size + numThreadPerBlock - 1) / numThreadPerBlock;
// launch the kernel
cudaEventRecord(start, 0);
last_digit_kernel<<<numBlock, numThreadPerBlock>>>(d_out, d_in, array_size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
// printf(">> Average time elapsed of part b: %f\n", elapsedTime);
// copy back the result array from GPU
int * h_out_array = (int *)malloc(array_byte);
cudaMemcpy(h_out_array, d_out, array_byte, cudaMemcpyDeviceToHost);
// output the result array into file
FILE * fptr_b = fopen("./q1b.txt", "w");
if (!fptr_b) {
printf("!! Error in opening output file \n");
exit(1);
}
for (int i = 0; i < array_size; ++i) {
fprintf(fptr_b, "%d", h_out_array[i]);
if (i < array_size - 1)
fprintf(fptr_b, ", ");
}
fclose(fptr_b);
// free CPU memory allocation
free(h_in);
free(h_out_array);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_intermediate);
return 0;
} | .file "tmpxft_0009e6ab_00000000-6_q1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "r"
.LC1:
.string "./inp.txt"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "!! Error in opening data file \n"
.section .rodata.str1.1
.LC3:
.string "%d,"
.text
.globl _Z9read_dataPi
.type _Z9read_dataPi, @function
_Z9read_dataPi:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r15
leaq .LC0(%rip), %rsi
leaq .LC1(%rip), %rdi
call fopen@PLT
testq %rax, %rax
je .L9
movq %rax, %rbp
movl $4000000, %edi
call malloc@PLT
movq %rax, %r14
movq %rax, %rbx
movl $0, %r12d
leaq .LC3(%rip), %r13
.L5:
movq %rbp, %rdi
call feof@PLT
testl %eax, %eax
jne .L6
movq %rbx, %rdx
movq %r13, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
addq $4, %rbx
cmpl $1, %eax
jne .L6
addl $1, %r12d
jmp .L5
.L9:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L6:
movq %rbp, %rdi
call fclose@PLT
movl %r12d, (%r15)
movq %r14, %rax
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z9read_dataPi, .-_Z9read_dataPi
.globl _Z13round_up_pow2i
.type _Z13round_up_pow2i, @function
_Z13round_up_pow2i:
.LFB2058:
.cfi_startproc
endbr64
movl $1, %eax
cmpl $1, %edi
jle .L10
.L12:
addl %eax, %eax
cmpl %eax, %edi
jg .L12
.L10:
ret
.cfi_endproc
.LFE2058:
.size _Z13round_up_pow2i, .-_Z13round_up_pow2i
.globl _Z15calc_num_threadi
.type _Z15calc_num_threadi, @function
_Z15calc_num_threadi:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pxor %xmm0, %xmm0
cvtsi2sdl %edi, %xmm0
pxor %xmm1, %xmm1
ucomisd %xmm0, %xmm1
ja .L20
sqrtsd %xmm0, %xmm0
.L18:
cvttsd2sil %xmm0, %edi
call _Z13round_up_pow2i
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
call sqrt@PLT
jmp .L18
.cfi_endproc
.LFE2059:
.size _Z15calc_num_threadi, .-_Z15calc_num_threadi
.globl _Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii
.type _Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii, @function
_Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii:
.LFB2086:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L26
.L22:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L27
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19shmem_reduce_kernelPiPKii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L22
.L27:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii, .-_Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii
.globl _Z19shmem_reduce_kernelPiPKii
.type _Z19shmem_reduce_kernelPiPKii, @function
_Z19shmem_reduce_kernelPiPKii:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z19shmem_reduce_kernelPiPKii, .-_Z19shmem_reduce_kernelPiPKii
.globl _Z6reducePiS_S_i
.type _Z6reducePiS_S_i, @function
_Z6reducePiS_S_i:
.LFB2060:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $32, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %r13
movq %rsi, %r12
movq %rdx, %r14
movl %ecx, %ebp
movl %ecx, %edi
call _Z15calc_num_threadi
movl %eax, %ecx
leal -1(%rax,%rbp), %eax
cltd
idivl %ecx
movl %eax, %ebx
movl %ecx, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movslq %ecx, %rcx
movl $0, %r9d
leaq 0(,%rcx,4), %r8
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L34
.L31:
movl %ebx, %edi
call _Z13round_up_pow2i
movl %eax, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movslq %ebx, %rax
movl $0, %r9d
leaq 0(,%rax,4), %r8
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L35
.L30:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L34:
.cfi_restore_state
movl %ebp, %edx
movq %r14, %rsi
movq %r12, %rdi
call _Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii
jmp .L31
.L35:
movl %ebx, %edx
movq %r12, %rsi
movq %r13, %rdi
call _Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii
jmp .L30
.cfi_endproc
.LFE2060:
.size _Z6reducePiS_S_i, .-_Z6reducePiS_S_i
.globl _Z41__device_stub__Z17last_digit_kernelPiPKiiPiPKii
.type _Z41__device_stub__Z17last_digit_kernelPiPKiiPiPKii, @function
_Z41__device_stub__Z17last_digit_kernelPiPKiiPiPKii:
.LFB2088:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L40
.L36:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L41
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L40:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17last_digit_kernelPiPKii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L36
.L41:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z41__device_stub__Z17last_digit_kernelPiPKiiPiPKii, .-_Z41__device_stub__Z17last_digit_kernelPiPKiiPiPKii
.globl _Z17last_digit_kernelPiPKii
.type _Z17last_digit_kernelPiPKii, @function
_Z17last_digit_kernelPiPKii:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z17last_digit_kernelPiPKiiPiPKii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z17last_digit_kernelPiPKii, .-_Z17last_digit_kernelPiPKii
.section .rodata.str1.8
.align 8
.LC5:
.string "!! Error: no devices supporting CUDA.\n"
.section .rodata.str1.1
.LC6:
.string "w"
.LC7:
.string "./q1a.txt"
.section .rodata.str1.8
.align 8
.LC8:
.string "!! Error in opening output file \n"
.section .rodata.str1.1
.LC9:
.string "%d"
.LC10:
.string "./q1b.txt"
.LC11:
.string ", "
.text
.globl main
.type main, @function
main:
.LFB2061:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rdi
call cudaGetDeviceCount@PLT
cmpl $0, 24(%rsp)
je .L57
movl $0, %edi
call cudaSetDevice@PLT
movl $0, 28(%rsp)
leaq 28(%rsp), %rdi
call _Z9read_dataPi
movq %rax, %rbp
movq %rax, 8(%rsp)
movl 28(%rsp), %r15d
movslq %r15d, %r14
leal 0(,%r15,4), %ebx
movslq %ebx, %rbx
leaq 40(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
leaq 64(%rsp), %rdi
call cudaEventCreate@PLT
leaq 72(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movl %r15d, %ecx
movq 40(%rsp), %rdx
movq 48(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z6reducePiS_S_i
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movq 72(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 32(%rsp), %rdi
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 36(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 56(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC6(%rip), %rsi
leaq .LC7(%rip), %rdi
call fopen@PLT
movq %rax, %rbp
testq %rax, %rax
je .L58
movl 36(%rsp), %ecx
leaq .LC9(%rip), %rdx
movl $2, %esi
movq %rax, %rdi
movl $0, %eax
call __fprintf_chk@PLT
movq %rbp, %rdi
call fclose@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rax
movq %rax, 56(%rsp)
movl %r15d, %edi
call _Z15calc_num_threadi
movl %eax, %ebp
leal -1(%r15,%rax), %eax
cltd
idivl %ebp
movl %eax, %r12d
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movl %ebp, 92(%rsp)
movl $1, 96(%rsp)
movl %r12d, 80(%rsp)
movl $1, 84(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L59
.L47:
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movq 72(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 32(%rsp), %rdi
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
call cudaEventElapsedTime@PLT
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r12
movl $2, %ecx
movq %rbx, %rdx
movq 56(%rsp), %rsi
movq %rax, %rdi
call cudaMemcpy@PLT
leaq .LC6(%rip), %rsi
leaq .LC10(%rip), %rdi
call fopen@PLT
movq %rax, %rbp
testq %rax, %rax
je .L48
movl $0, %ebx
leal -1(%r15), %r13d
testl %r15d, %r15d
jg .L49
.L50:
movq %rbp, %rdi
call fclose@PLT
movq 8(%rsp), %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L60
movl $0, %eax
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L57:
.cfi_restore_state
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L58:
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L59:
movl %r15d, %edx
movq 40(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z41__device_stub__Z17last_digit_kernelPiPKiiPiPKii
jmp .L47
.L48:
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L61:
leaq .LC11(%rip), %rdx
movl $2, %esi
movq %rbp, %rdi
movl $0, %eax
call __fprintf_chk@PLT
.L51:
addq $1, %rbx
cmpq %rbx, %r14
je .L50
.L49:
movl (%r12,%rbx,4), %ecx
leaq .LC9(%rip), %rdx
movl $2, %esi
movq %rbp, %rdi
movl $0, %eax
call __fprintf_chk@PLT
cmpl %ebx, %r13d
jle .L51
jmp .L61
.L60:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size main, .-main
.section .rodata.str1.1
.LC12:
.string "_Z17last_digit_kernelPiPKii"
.LC13:
.string "_Z19shmem_reduce_kernelPiPKii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2091:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z17last_digit_kernelPiPKii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z19shmem_reduce_kernelPiPKii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#define MAX_ARRAY_SIZE 1000000
/*
* Read data from ./inp.txt
* Store the data in (int * data)
* Return the number of elements read into the array
*/
int * read_data(int * size)
{
FILE * fptr = fopen("./inp.txt", "r");
if (!fptr) {
printf("!! Error in opening data file \n");
exit(1);
}
int cur_array_size = MAX_ARRAY_SIZE;
int * buffer = (int *)malloc(cur_array_size * sizeof(int));
int i = 0;
while (!feof(fptr)) {
if (fscanf(fptr, "%d,", &buffer[i]) != 1) {
break;
}
++i;
}
fclose(fptr);
*size = i;
return buffer;
}
/*
* Round up to the nearest power of 2
*/
int round_up_pow2(int val) {
if (val == 0) return 1;
int pow2 = 1;
while (pow2 < val) {
pow2 <<= 1;
}
return pow2;
}
/*
* Calculate the number of threads per block based on array size
* The function is so designed that a reduction on the array can
* be completed in two steps.
* The assumption is that the size of the array is no more than
* 1,000,000, such that the number of threads is no more than
* 1024, which is the computational limit of the GPU device.
*/
int calc_num_thread(int size) {
int approx = (int)sqrt((double)size);
// find the nearest power of 2
return round_up_pow2(approx);
}
/*
* GPU kernel for part a: reduction, getting the min value in a sub-array
*/
__global__ void shmem_reduce_kernel(int * d_out, const int * d_in, const int size)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s && (myId + s) < size)
{
if (sdata[tid] > sdata[tid + s])
sdata[tid] = sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
/*
* Reduction-based algorithm to find the min value in (int * d_in)
*/
void reduce(int * d_out, int * d_intermediate, int * d_in, int size)
{
// assumes that size is not greater than maxThreadsPerBlock^2
const int maxThreadsPerBlock = calc_num_thread(size);
int threads = maxThreadsPerBlock;
int blocks = (size + maxThreadsPerBlock - 1) / maxThreadsPerBlock;
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>(d_intermediate, d_in, size);
// now we're down to one block left, so reduce it
threads = blocks;
blocks = 1;
shmem_reduce_kernel<<<blocks, round_up_pow2(threads), threads * sizeof(int)>>>(d_out, d_intermediate, threads);
}
/*
* GPU kernel for part b: calculate the last digit of each element in the input array in parallel
*/
__global__ void last_digit_kernel(int * d_out, const int * d_in, const int size)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId < size)
d_out[myId] = d_in[myId] % 10;
}
int main(void)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("!! Error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
// data array on host
int array_size = 0;
int * h_in = read_data(&array_size);
int array_byte = array_size * sizeof(int);
// printf(">> Number of data read in: %d\n", array_size);
/*
* part a
*/
// declare GPU memory pointers
int * d_in, * d_intermediate, * d_out;
// allocate GPU memory
cudaMalloc((void **) &d_in, array_byte);
cudaMalloc((void **) &d_intermediate, array_byte);
cudaMalloc((void **) &d_out, sizeof(int));
// transfer the input array to the GPU
cudaMemcpy(d_in, h_in, array_byte, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// launch the kernel
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_in, array_size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
// copy back the min from GPU
int h_out;
cudaMemcpy(&h_out, d_out, sizeof(int), cudaMemcpyDeviceToHost);
// printf(">> Average time elapsed in part a: %f\n", elapsedTime);
// printf(">> Min value returned by device: %d\n", h_out);
// output the result into file
FILE * fptr_a = fopen("./q1a.txt", "w");
if (!fptr_a) {
printf("!! Error in opening output file \n");
exit(1);
}
fprintf(fptr_a, "%d", h_out);
fclose(fptr_a);
// free GPU memory allocation
// reuse d_in for the input array of part b
// reuse d_intermediate for the output array of part b
cudaFree(d_out);
/*
* part b
*/
d_out = d_intermediate;
int numThreadPerBlock = calc_num_thread(array_size);
int numBlock = (array_size + numThreadPerBlock - 1) / numThreadPerBlock;
// launch the kernel
cudaEventRecord(start, 0);
last_digit_kernel<<<numBlock, numThreadPerBlock>>>(d_out, d_in, array_size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
// printf(">> Average time elapsed of part b: %f\n", elapsedTime);
// copy back the result array from GPU
int * h_out_array = (int *)malloc(array_byte);
cudaMemcpy(h_out_array, d_out, array_byte, cudaMemcpyDeviceToHost);
// output the result array into file
FILE * fptr_b = fopen("./q1b.txt", "w");
if (!fptr_b) {
printf("!! Error in opening output file \n");
exit(1);
}
for (int i = 0; i < array_size; ++i) {
fprintf(fptr_b, "%d", h_out_array[i]);
if (i < array_size - 1)
fprintf(fptr_b, ", ");
}
fclose(fptr_b);
// free CPU memory allocation
free(h_in);
free(h_out_array);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_intermediate);
return 0;
} | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define MAX_ARRAY_SIZE 1000000
/*
* Read data from ./inp.txt
* Store the data in (int * data)
* Return the number of elements read into the array
*/
int * read_data(int * size)
{
FILE * fptr = fopen("./inp.txt", "r");
if (!fptr) {
printf("!! Error in opening data file \n");
exit(1);
}
int cur_array_size = MAX_ARRAY_SIZE;
int * buffer = (int *)malloc(cur_array_size * sizeof(int));
int i = 0;
while (!feof(fptr)) {
if (fscanf(fptr, "%d,", &buffer[i]) != 1) {
break;
}
++i;
}
fclose(fptr);
*size = i;
return buffer;
}
/*
* Round up to the nearest power of 2
*/
int round_up_pow2(int val) {
if (val == 0) return 1;
int pow2 = 1;
while (pow2 < val) {
pow2 <<= 1;
}
return pow2;
}
/*
* Calculate the number of threads per block based on array size
* The function is so designed that a reduction on the array can
* be completed in two steps.
* The assumption is that the size of the array is no more than
* 1,000,000, such that the number of threads is no more than
* 1024, which is the computational limit of the GPU device.
*/
int calc_num_thread(int size) {
int approx = (int)sqrt((double)size);
// find the nearest power of 2
return round_up_pow2(approx);
}
/*
* GPU kernel for part a: reduction, getting the min value in a sub-array
*/
__global__ void shmem_reduce_kernel(int * d_out, const int * d_in, const int size)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s && (myId + s) < size)
{
if (sdata[tid] > sdata[tid + s])
sdata[tid] = sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
/*
* Reduction-based algorithm to find the min value in (int * d_in)
*/
void reduce(int * d_out, int * d_intermediate, int * d_in, int size)
{
// assumes that size is not greater than maxThreadsPerBlock^2
const int maxThreadsPerBlock = calc_num_thread(size);
int threads = maxThreadsPerBlock;
int blocks = (size + maxThreadsPerBlock - 1) / maxThreadsPerBlock;
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>(d_intermediate, d_in, size);
// now we're down to one block left, so reduce it
threads = blocks;
blocks = 1;
shmem_reduce_kernel<<<blocks, round_up_pow2(threads), threads * sizeof(int)>>>(d_out, d_intermediate, threads);
}
/*
* GPU kernel for part b: calculate the last digit of each element in the input array in parallel
*/
__global__ void last_digit_kernel(int * d_out, const int * d_in, const int size)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId < size)
d_out[myId] = d_in[myId] % 10;
}
int main(void)
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("!! Error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
hipSetDevice(dev);
// data array on host
int array_size = 0;
int * h_in = read_data(&array_size);
int array_byte = array_size * sizeof(int);
// printf(">> Number of data read in: %d\n", array_size);
/*
* part a
*/
// declare GPU memory pointers
int * d_in, * d_intermediate, * d_out;
// allocate GPU memory
hipMalloc((void **) &d_in, array_byte);
hipMalloc((void **) &d_intermediate, array_byte);
hipMalloc((void **) &d_out, sizeof(int));
// transfer the input array to the GPU
hipMemcpy(d_in, h_in, array_byte, hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// launch the kernel
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_in, array_size);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
// copy back the min from GPU
int h_out;
hipMemcpy(&h_out, d_out, sizeof(int), hipMemcpyDeviceToHost);
// printf(">> Average time elapsed in part a: %f\n", elapsedTime);
// printf(">> Min value returned by device: %d\n", h_out);
// output the result into file
FILE * fptr_a = fopen("./q1a.txt", "w");
if (!fptr_a) {
printf("!! Error in opening output file \n");
exit(1);
}
fprintf(fptr_a, "%d", h_out);
fclose(fptr_a);
// free GPU memory allocation
// reuse d_in for the input array of part b
// reuse d_intermediate for the output array of part b
hipFree(d_out);
/*
* part b
*/
d_out = d_intermediate;
int numThreadPerBlock = calc_num_thread(array_size);
int numBlock = (array_size + numThreadPerBlock - 1) / numThreadPerBlock;
// launch the kernel
hipEventRecord(start, 0);
last_digit_kernel<<<numBlock, numThreadPerBlock>>>(d_out, d_in, array_size);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
// printf(">> Average time elapsed of part b: %f\n", elapsedTime);
// copy back the result array from GPU
int * h_out_array = (int *)malloc(array_byte);
hipMemcpy(h_out_array, d_out, array_byte, hipMemcpyDeviceToHost);
// output the result array into file
FILE * fptr_b = fopen("./q1b.txt", "w");
if (!fptr_b) {
printf("!! Error in opening output file \n");
exit(1);
}
for (int i = 0; i < array_size; ++i) {
fprintf(fptr_b, "%d", h_out_array[i]);
if (i < array_size - 1)
fprintf(fptr_b, ", ");
}
fclose(fptr_b);
// free CPU memory allocation
free(h_in);
free(h_out_array);
// free GPU memory allocation
hipFree(d_in);
hipFree(d_intermediate);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define MAX_ARRAY_SIZE 1000000
/*
* Read data from ./inp.txt
* Store the data in (int * data)
* Return the number of elements read into the array
*/
int * read_data(int * size)
{
FILE * fptr = fopen("./inp.txt", "r");
if (!fptr) {
printf("!! Error in opening data file \n");
exit(1);
}
int cur_array_size = MAX_ARRAY_SIZE;
int * buffer = (int *)malloc(cur_array_size * sizeof(int));
int i = 0;
while (!feof(fptr)) {
if (fscanf(fptr, "%d,", &buffer[i]) != 1) {
break;
}
++i;
}
fclose(fptr);
*size = i;
return buffer;
}
/*
* Round up to the nearest power of 2
*/
int round_up_pow2(int val) {
if (val == 0) return 1;
int pow2 = 1;
while (pow2 < val) {
pow2 <<= 1;
}
return pow2;
}
/*
* Calculate the number of threads per block based on array size
* The function is so designed that a reduction on the array can
* be completed in two steps.
* The assumption is that the size of the array is no more than
* 1,000,000, such that the number of threads is no more than
* 1024, which is the computational limit of the GPU device.
*/
int calc_num_thread(int size) {
int approx = (int)sqrt((double)size);
// find the nearest power of 2
return round_up_pow2(approx);
}
/*
* GPU kernel for part a: reduction, getting the min value in a sub-array
*/
__global__ void shmem_reduce_kernel(int * d_out, const int * d_in, const int size)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s && (myId + s) < size)
{
if (sdata[tid] > sdata[tid + s])
sdata[tid] = sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
/*
* Reduction-based algorithm to find the min value in (int * d_in)
*/
void reduce(int * d_out, int * d_intermediate, int * d_in, int size)
{
// assumes that size is not greater than maxThreadsPerBlock^2
const int maxThreadsPerBlock = calc_num_thread(size);
int threads = maxThreadsPerBlock;
int blocks = (size + maxThreadsPerBlock - 1) / maxThreadsPerBlock;
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>(d_intermediate, d_in, size);
// now we're down to one block left, so reduce it
threads = blocks;
blocks = 1;
shmem_reduce_kernel<<<blocks, round_up_pow2(threads), threads * sizeof(int)>>>(d_out, d_intermediate, threads);
}
/*
* GPU kernel for part b: calculate the last digit of each element in the input array in parallel
*/
__global__ void last_digit_kernel(int * d_out, const int * d_in, const int size)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId < size)
d_out[myId] = d_in[myId] % 10;
}
int main(void)
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("!! Error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
hipSetDevice(dev);
// data array on host
int array_size = 0;
int * h_in = read_data(&array_size);
int array_byte = array_size * sizeof(int);
// printf(">> Number of data read in: %d\n", array_size);
/*
* part a
*/
// declare GPU memory pointers
int * d_in, * d_intermediate, * d_out;
// allocate GPU memory
hipMalloc((void **) &d_in, array_byte);
hipMalloc((void **) &d_intermediate, array_byte);
hipMalloc((void **) &d_out, sizeof(int));
// transfer the input array to the GPU
hipMemcpy(d_in, h_in, array_byte, hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// launch the kernel
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_in, array_size);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
// copy back the min from GPU
int h_out;
hipMemcpy(&h_out, d_out, sizeof(int), hipMemcpyDeviceToHost);
// printf(">> Average time elapsed in part a: %f\n", elapsedTime);
// printf(">> Min value returned by device: %d\n", h_out);
// output the result into file
FILE * fptr_a = fopen("./q1a.txt", "w");
if (!fptr_a) {
printf("!! Error in opening output file \n");
exit(1);
}
fprintf(fptr_a, "%d", h_out);
fclose(fptr_a);
// free GPU memory allocation
// reuse d_in for the input array of part b
// reuse d_intermediate for the output array of part b
hipFree(d_out);
/*
* part b
*/
d_out = d_intermediate;
int numThreadPerBlock = calc_num_thread(array_size);
int numBlock = (array_size + numThreadPerBlock - 1) / numThreadPerBlock;
// launch the kernel
hipEventRecord(start, 0);
last_digit_kernel<<<numBlock, numThreadPerBlock>>>(d_out, d_in, array_size);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
// printf(">> Average time elapsed of part b: %f\n", elapsedTime);
// copy back the result array from GPU
int * h_out_array = (int *)malloc(array_byte);
hipMemcpy(h_out_array, d_out, array_byte, hipMemcpyDeviceToHost);
// output the result array into file
FILE * fptr_b = fopen("./q1b.txt", "w");
if (!fptr_b) {
printf("!! Error in opening output file \n");
exit(1);
}
for (int i = 0; i < array_size; ++i) {
fprintf(fptr_b, "%d", h_out_array[i]);
if (i < array_size - 1)
fprintf(fptr_b, ", ");
}
fclose(fptr_b);
// free CPU memory allocation
free(h_in);
free(h_out_array);
// free GPU memory allocation
hipFree(d_in);
hipFree(d_intermediate);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19shmem_reduce_kernelPiPKii
.globl _Z19shmem_reduce_kernelPiPKii
.p2align 8
.type _Z19shmem_reduce_kernelPiPKii,@function
_Z19shmem_reduce_kernelPiPKii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[6:7], s[0:1], 0x8
s_mov_b32 s4, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s4, s3, v[0:1]
s_cmp_lt_u32 s3, 2
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s6, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v3, vcc_lo
global_load_b32 v3, v[2:3], off
v_lshl_add_u32 v2, v0, 2, 0
s_waitcnt vmcnt(0)
ds_store_b32 v2, v3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_6
s_load_b32 s5, s[0:1], 0x10
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s2
s_cmp_lt_u32 s6, 4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_6
.LBB0_3:
s_mov_b32 s6, s3
s_lshr_b32 s3, s3, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v3, s3, v1
v_cmp_gt_u32_e32 vcc_lo, s3, v0
s_waitcnt lgkmcnt(0)
v_cmp_gt_u32_e64 s2, s5, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s7, vcc_lo, s2
s_and_saveexec_b32 s2, s7
s_cbranch_execz .LBB0_2
v_add_nc_u32_e32 v3, s3, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v3, v3, 2, 0
ds_load_b32 v4, v2
ds_load_b32 v3, v3
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, v4, v3
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_2
ds_store_b32 v2, v3
s_branch .LBB0_2
.LBB0_6:
s_set_inst_prefetch_distance 0x2
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_8
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0
s_load_b64 s[0:1], s[0:1], 0x0
s_mov_b32 s5, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[2:3], s[4:5], 2
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v1, v0, s[0:1]
.LBB0_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19shmem_reduce_kernelPiPKii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19shmem_reduce_kernelPiPKii, .Lfunc_end0-_Z19shmem_reduce_kernelPiPKii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z17last_digit_kernelPiPKii
.globl _Z17last_digit_kernelPiPKii
.p2align 8
.type _Z17last_digit_kernelPiPKii,@function
_Z17last_digit_kernelPiPKii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB1_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_mul_hi_i32 v3, v2, 0x66666667
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v4, 31, v3
v_ashrrev_i32_e32 v3, 2, v3
v_add_nc_u32_e32 v3, v3, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, v3, 10
v_sub_nc_u32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB1_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17last_digit_kernelPiPKii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z17last_digit_kernelPiPKii, .Lfunc_end1-_Z17last_digit_kernelPiPKii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19shmem_reduce_kernelPiPKii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19shmem_reduce_kernelPiPKii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17last_digit_kernelPiPKii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17last_digit_kernelPiPKii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define MAX_ARRAY_SIZE 1000000
/*
* Read data from ./inp.txt
* Store the data in (int * data)
* Return the number of elements read into the array
*/
int * read_data(int * size)
{
FILE * fptr = fopen("./inp.txt", "r");
if (!fptr) {
printf("!! Error in opening data file \n");
exit(1);
}
int cur_array_size = MAX_ARRAY_SIZE;
int * buffer = (int *)malloc(cur_array_size * sizeof(int));
int i = 0;
while (!feof(fptr)) {
if (fscanf(fptr, "%d,", &buffer[i]) != 1) {
break;
}
++i;
}
fclose(fptr);
*size = i;
return buffer;
}
/*
* Round up to the nearest power of 2
*/
int round_up_pow2(int val) {
if (val == 0) return 1;
int pow2 = 1;
while (pow2 < val) {
pow2 <<= 1;
}
return pow2;
}
/*
* Calculate the number of threads per block based on array size
* The function is so designed that a reduction on the array can
* be completed in two steps.
* The assumption is that the size of the array is no more than
* 1,000,000, such that the number of threads is no more than
* 1024, which is the computational limit of the GPU device.
*/
int calc_num_thread(int size) {
int approx = (int)sqrt((double)size);
// find the nearest power of 2
return round_up_pow2(approx);
}
/*
* GPU kernel for part a: reduction, getting the min value in a sub-array
*/
__global__ void shmem_reduce_kernel(int * d_out, const int * d_in, const int size)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s && (myId + s) < size)
{
if (sdata[tid] > sdata[tid + s])
sdata[tid] = sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
/*
* Reduction-based algorithm to find the min value in (int * d_in)
*/
void reduce(int * d_out, int * d_intermediate, int * d_in, int size)
{
// assumes that size is not greater than maxThreadsPerBlock^2
const int maxThreadsPerBlock = calc_num_thread(size);
int threads = maxThreadsPerBlock;
int blocks = (size + maxThreadsPerBlock - 1) / maxThreadsPerBlock;
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>(d_intermediate, d_in, size);
// now we're down to one block left, so reduce it
threads = blocks;
blocks = 1;
shmem_reduce_kernel<<<blocks, round_up_pow2(threads), threads * sizeof(int)>>>(d_out, d_intermediate, threads);
}
/*
* GPU kernel for part b: calculate the last digit of each element in the input array in parallel
*/
__global__ void last_digit_kernel(int * d_out, const int * d_in, const int size)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId < size)
d_out[myId] = d_in[myId] % 10;
}
int main(void)
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("!! Error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
hipSetDevice(dev);
// data array on host
int array_size = 0;
int * h_in = read_data(&array_size);
int array_byte = array_size * sizeof(int);
// printf(">> Number of data read in: %d\n", array_size);
/*
* part a
*/
// declare GPU memory pointers
int * d_in, * d_intermediate, * d_out;
// allocate GPU memory
hipMalloc((void **) &d_in, array_byte);
hipMalloc((void **) &d_intermediate, array_byte);
hipMalloc((void **) &d_out, sizeof(int));
// transfer the input array to the GPU
hipMemcpy(d_in, h_in, array_byte, hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// launch the kernel
hipEventRecord(start, 0);
reduce(d_out, d_intermediate, d_in, array_size);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
// copy back the min from GPU
int h_out;
hipMemcpy(&h_out, d_out, sizeof(int), hipMemcpyDeviceToHost);
// printf(">> Average time elapsed in part a: %f\n", elapsedTime);
// printf(">> Min value returned by device: %d\n", h_out);
// output the result into file
FILE * fptr_a = fopen("./q1a.txt", "w");
if (!fptr_a) {
printf("!! Error in opening output file \n");
exit(1);
}
fprintf(fptr_a, "%d", h_out);
fclose(fptr_a);
// free GPU memory allocation
// reuse d_in for the input array of part b
// reuse d_intermediate for the output array of part b
hipFree(d_out);
/*
* part b
*/
d_out = d_intermediate;
int numThreadPerBlock = calc_num_thread(array_size);
int numBlock = (array_size + numThreadPerBlock - 1) / numThreadPerBlock;
// launch the kernel
hipEventRecord(start, 0);
last_digit_kernel<<<numBlock, numThreadPerBlock>>>(d_out, d_in, array_size);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
// printf(">> Average time elapsed of part b: %f\n", elapsedTime);
// copy back the result array from GPU
int * h_out_array = (int *)malloc(array_byte);
hipMemcpy(h_out_array, d_out, array_byte, hipMemcpyDeviceToHost);
// output the result array into file
FILE * fptr_b = fopen("./q1b.txt", "w");
if (!fptr_b) {
printf("!! Error in opening output file \n");
exit(1);
}
for (int i = 0; i < array_size; ++i) {
fprintf(fptr_b, "%d", h_out_array[i]);
if (i < array_size - 1)
fprintf(fptr_b, ", ");
}
fclose(fptr_b);
// free CPU memory allocation
free(h_in);
free(h_out_array);
// free GPU memory allocation
hipFree(d_in);
hipFree(d_intermediate);
return 0;
} | .text
.file "q1.hip"
.globl _Z9read_dataPi # -- Begin function _Z9read_dataPi
.p2align 4, 0x90
.type _Z9read_dataPi,@function
_Z9read_dataPi: # @_Z9read_dataPi
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl $.L.str, %edi
movl $.L.str.1, %esi
callq fopen
testq %rax, %rax
je .LBB0_6
# %bb.1:
movq %rax, %r14
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r15
movq %r14, %rdi
callq feof
xorl %r13d, %r13d
testl %eax, %eax
jne .LBB0_5
# %bb.2: # %.lr.ph.preheader
movq %r15, %r12
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB0_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str.3, %esi
movq %r14, %rdi
movq %r12, %rdx
xorl %eax, %eax
callq __isoc23_fscanf
cmpl $1, %eax
jne .LBB0_5
# %bb.4: # in Loop: Header=BB0_3 Depth=1
incq %r13
movq %r14, %rdi
callq feof
addq $4, %r12
testl %eax, %eax
je .LBB0_3
.LBB0_5: # %._crit_edge
movq %r14, %rdi
callq fclose
movl %r13d, (%rbx)
movq %r15, %rax
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB0_6:
.cfi_def_cfa_offset 48
movl $.Lstr, %edi
callq puts@PLT
movl $1, %edi
callq exit
.Lfunc_end0:
.size _Z9read_dataPi, .Lfunc_end0-_Z9read_dataPi
.cfi_endproc
# -- End function
.globl _Z13round_up_pow2i # -- Begin function _Z13round_up_pow2i
.p2align 4, 0x90
.type _Z13round_up_pow2i,@function
_Z13round_up_pow2i: # @_Z13round_up_pow2i
.cfi_startproc
# %bb.0:
testl %edi, %edi
je .LBB1_1
# %bb.2: # %.preheader.preheader
movl $1, %ecx
.p2align 4, 0x90
.LBB1_3: # %.preheader
# =>This Inner Loop Header: Depth=1
movl %ecx, %eax
leal (%rax,%rax), %ecx
cmpl %edi, %eax
jl .LBB1_3
# %bb.4: # %.loopexit
# kill: def $eax killed $eax killed $rax
retq
.LBB1_1:
movl $1, %eax
# kill: def $eax killed $eax killed $rax
retq
.Lfunc_end1:
.size _Z13round_up_pow2i, .Lfunc_end1-_Z13round_up_pow2i
.cfi_endproc
# -- End function
.globl _Z15calc_num_threadi # -- Begin function _Z15calc_num_threadi
.p2align 4, 0x90
.type _Z15calc_num_threadi,@function
_Z15calc_num_threadi: # @_Z15calc_num_threadi
.cfi_startproc
# %bb.0:
cvtsi2sd %edi, %xmm0
xorpd %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jb .LBB2_2
# %bb.1:
sqrtsd %xmm0, %xmm0
jmp .LBB2_3
.LBB2_2: # %call.sqrt
pushq %rax
.cfi_def_cfa_offset 16
callq sqrt
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_3: # %.split
cvttsd2si %xmm0, %ecx
testl %ecx, %ecx
je .LBB2_4
# %bb.5: # %.preheader.i.preheader
movl $1, %edx
.p2align 4, 0x90
.LBB2_6: # %.preheader.i
# =>This Inner Loop Header: Depth=1
movl %edx, %eax
leal (%rax,%rax), %edx
cmpl %ecx, %eax
jl .LBB2_6
# %bb.7: # %_Z13round_up_pow2i.exit
# kill: def $eax killed $eax killed $rax
retq
.LBB2_4:
movl $1, %eax
# kill: def $eax killed $eax killed $rax
retq
.Lfunc_end2:
.size _Z15calc_num_threadi, .Lfunc_end2-_Z15calc_num_threadi
.cfi_endproc
# -- End function
.globl _Z34__device_stub__shmem_reduce_kernelPiPKii # -- Begin function _Z34__device_stub__shmem_reduce_kernelPiPKii
.p2align 4, 0x90
.type _Z34__device_stub__shmem_reduce_kernelPiPKii,@function
_Z34__device_stub__shmem_reduce_kernelPiPKii: # @_Z34__device_stub__shmem_reduce_kernelPiPKii
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19shmem_reduce_kernelPiPKii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end3:
.size _Z34__device_stub__shmem_reduce_kernelPiPKii, .Lfunc_end3-_Z34__device_stub__shmem_reduce_kernelPiPKii
.cfi_endproc
# -- End function
.globl _Z6reducePiS_S_i # -- Begin function _Z6reducePiS_S_i
.p2align 4, 0x90
.type _Z6reducePiS_S_i,@function
_Z6reducePiS_S_i: # @_Z6reducePiS_S_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $104, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %r12d
movq %rdx, %r13
movq %rsi, %rbx
cvtsi2sd %ecx, %xmm0
movq %rdi, %r14
xorpd %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jb .LBB4_2
# %bb.1:
sqrtsd %xmm0, %xmm0
jmp .LBB4_3
.LBB4_2: # %call.sqrt
callq sqrt
.LBB4_3: # %.split
cvttsd2si %xmm0, %eax
testl %eax, %eax
je .LBB4_4
# %bb.5: # %.preheader.i.i.preheader
movl $1, %edx
.p2align 4, 0x90
.LBB4_6: # %.preheader.i.i
# =>This Inner Loop Header: Depth=1
movl %edx, %ecx
leal (%rcx,%rcx), %edx
cmpl %eax, %ecx
jl .LBB4_6
jmp .LBB4_7
.LBB4_4:
movl $1, %ecx
.LBB4_7: # %_Z15calc_num_threadi.exit
movabsq $4294967296, %rbp # imm = 0x100000000
leal (%r12,%rcx), %eax
decl %eax
cltd
idivl %ecx
movl %eax, %r15d
movslq %ecx, %rax
leaq (,%rax,4), %r8
leaq (%r15,%rbp), %rdi
movl %eax, %edx
orq %rbp, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_9
# %bb.8:
movq %rbx, 72(%rsp)
movq %r13, 64(%rsp)
movl %r12d, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19shmem_reduce_kernelPiPKii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_9:
leaq 1(%rbp), %rdi
movq %rdi, %rdx
testl %r15d, %r15d
je .LBB4_13
# %bb.10: # %.preheader.i.preheader
movl $1, %ecx
.p2align 4, 0x90
.LBB4_11: # %.preheader.i
# =>This Inner Loop Header: Depth=1
movl %ecx, %eax
leal (%rax,%rax), %ecx
cmpl %r15d, %eax
jl .LBB4_11
# %bb.12: # %_Z13round_up_pow2i.exit.loopexit
movl %eax, %edx
orq %rbp, %rdx
.LBB4_13: # %_Z13round_up_pow2i.exit
movslq %r15d, %r8
shlq $2, %r8
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_15
# %bb.14:
movq %r14, 72(%rsp)
movq %rbx, 64(%rsp)
movl %r15d, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19shmem_reduce_kernelPiPKii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_15:
addq $104, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z6reducePiS_S_i, .Lfunc_end4-_Z6reducePiS_S_i
.cfi_endproc
# -- End function
.globl _Z32__device_stub__last_digit_kernelPiPKii # -- Begin function _Z32__device_stub__last_digit_kernelPiPKii
.p2align 4, 0x90
.type _Z32__device_stub__last_digit_kernelPiPKii,@function
_Z32__device_stub__last_digit_kernelPiPKii: # @_Z32__device_stub__last_digit_kernelPiPKii
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17last_digit_kernelPiPKii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end5:
.size _Z32__device_stub__last_digit_kernelPiPKii, .Lfunc_end5-_Z32__device_stub__last_digit_kernelPiPKii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 56(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 56(%rsp)
je .LBB6_1
# %bb.3:
xorl %edi, %edi
callq hipSetDevice
movl $0, 20(%rsp)
leaq 20(%rsp), %rdi
callq _Z9read_dataPi
movq %rax, %rbx
movl 20(%rsp), %r14d
leal (,%r14,4), %eax
movslq %eax, %r15
leaq 32(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
leaq 40(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4, %esi
callq hipMalloc
movq 32(%rsp), %rdi
movq %rbx, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 24(%rsp), %rdi
callq hipEventCreate
movq %rsp, %rdi
callq hipEventCreate
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
movq 40(%rsp), %rsi
movq 32(%rsp), %rdx
movl %r14d, %ecx
callq _Z6reducePiS_S_i
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq (%rsp), %rdx
leaq 52(%rsp), %rdi
callq hipEventElapsedTime
movq 8(%rsp), %rsi
leaq 48(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl $.L.str.5, %edi
movl $.L.str.6, %esi
callq fopen
testq %rax, %rax
je .LBB6_4
# %bb.5:
movq %rax, %r12
movl 48(%rsp), %edx
movl $.L.str.8, %esi
movq %rax, %rdi
xorl %eax, %eax
callq fprintf
movq %r12, %rdi
callq fclose
movq 8(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rax
cvtsi2sd %r14d, %xmm0
movq %rax, 8(%rsp)
xorpd %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jb .LBB6_7
# %bb.6:
sqrtsd %xmm0, %xmm0
jmp .LBB6_8
.LBB6_7: # %call.sqrt
callq sqrt
.LBB6_8: # %.split
cvttsd2si %xmm0, %eax
testl %eax, %eax
je .LBB6_9
# %bb.10: # %.preheader.i.i.preheader
movl $1, %ecx
.p2align 4, 0x90
.LBB6_11: # %.preheader.i.i
# =>This Inner Loop Header: Depth=1
movl %ecx, %r13d
leal (,%r13,2), %ecx
cmpl %eax, %r13d
jl .LBB6_11
jmp .LBB6_12
.LBB6_9:
movl $1, %r13d
.LBB6_12: # %_Z15calc_num_threadi.exit
leal (%r14,%r13), %eax
decl %eax
cltd
idivl %r13d
movl %eax, %r12d
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %r12
movl %r13d, %edx
orq %rax, %rdx
movq %r12, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_14
# %bb.13:
movq 8(%rsp), %rax
movq 32(%rsp), %rcx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movl %r14d, 60(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 60(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 64(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z17last_digit_kernelPiPKii, %edi
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_14:
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq (%rsp), %rdx
leaq 52(%rsp), %rdi
callq hipEventElapsedTime
movq %r15, %rdi
callq malloc
movq %rax, %r12
movq 8(%rsp), %rsi
movq %rax, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
movl $.L.str.9, %edi
movl $.L.str.6, %esi
callq fopen
testq %rax, %rax
je .LBB6_4
# %bb.15: # %.preheader
movq %rax, %r15
testl %r14d, %r14d
jle .LBB6_20
# %bb.16: # %.lr.ph
movslq %r14d, %r13
decq %r13
xorl %ebp, %ebp
jmp .LBB6_17
.p2align 4, 0x90
.LBB6_19: # in Loop: Header=BB6_17 Depth=1
incq %rbp
cmpq %rbp, %r14
je .LBB6_20
.LBB6_17: # =>This Inner Loop Header: Depth=1
movl (%r12,%rbp,4), %edx
movl $.L.str.8, %esi
movq %r15, %rdi
xorl %eax, %eax
callq fprintf
cmpq %r13, %rbp
jge .LBB6_19
# %bb.18: # in Loop: Header=BB6_17 Depth=1
movl $.L.str.10, %edi
movl $2, %esi
movl $1, %edx
movq %r15, %rcx
callq fwrite@PLT
jmp .LBB6_19
.LBB6_20: # %._crit_edge
movq %r15, %rdi
callq fclose
movq %rbx, %rdi
callq free
movq %r12, %rdi
callq free
movq 32(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB6_4:
.cfi_def_cfa_offset 208
movl $.Lstr.2, %edi
jmp .LBB6_2
.LBB6_1:
movl $.Lstr.3, %edi
.LBB6_2:
callq puts@PLT
movl $1, %edi
callq exit
.Lfunc_end6:
.size main, .Lfunc_end6-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19shmem_reduce_kernelPiPKii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17last_digit_kernelPiPKii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "./inp.txt"
.size .L.str, 10
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "r"
.size .L.str.1, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "%d,"
.size .L.str.3, 4
.type _Z19shmem_reduce_kernelPiPKii,@object # @_Z19shmem_reduce_kernelPiPKii
.section .rodata,"a",@progbits
.globl _Z19shmem_reduce_kernelPiPKii
.p2align 3, 0x0
_Z19shmem_reduce_kernelPiPKii:
.quad _Z34__device_stub__shmem_reduce_kernelPiPKii
.size _Z19shmem_reduce_kernelPiPKii, 8
.type _Z17last_digit_kernelPiPKii,@object # @_Z17last_digit_kernelPiPKii
.globl _Z17last_digit_kernelPiPKii
.p2align 3, 0x0
_Z17last_digit_kernelPiPKii:
.quad _Z32__device_stub__last_digit_kernelPiPKii
.size _Z17last_digit_kernelPiPKii, 8
.type .L.str.5,@object # @.str.5
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.5:
.asciz "./q1a.txt"
.size .L.str.5, 10
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "w"
.size .L.str.6, 2
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%d"
.size .L.str.8, 3
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "./q1b.txt"
.size .L.str.9, 10
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz ", "
.size .L.str.10, 3
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z19shmem_reduce_kernelPiPKii"
.size .L__unnamed_1, 30
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z17last_digit_kernelPiPKii"
.size .L__unnamed_2, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "!! Error in opening data file "
.size .Lstr, 31
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "!! Error in opening output file "
.size .Lstr.2, 33
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "!! Error: no devices supporting CUDA."
.size .Lstr.3, 38
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__shmem_reduce_kernelPiPKii
.addrsig_sym _Z32__device_stub__last_digit_kernelPiPKii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19shmem_reduce_kernelPiPKii
.addrsig_sym _Z17last_digit_kernelPiPKii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z17last_digit_kernelPiPKii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R4, R3, c[0x0][0x0], R4 ; /* 0x0000000003047a24 */
/* 0x001fca00078e0204 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R9, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fcc00078e0209 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ IMAD.HI R0, R2, 0x66666667, RZ ; /* 0x6666666702007827 */
/* 0x004fca00078e02ff */
/*00b0*/ SHF.R.U32.HI R5, RZ, 0x1f, R0 ; /* 0x0000001fff057819 */
/* 0x000fc80000011600 */
/*00c0*/ LEA.HI.SX32 R7, R0, R5, 0x1e ; /* 0x0000000500077211 */
/* 0x000fe200078ff2ff */
/*00d0*/ IMAD.WIDE R4, R4, R9, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fc800078e0209 */
/*00e0*/ IMAD R7, R7, -0xa, R2 ; /* 0xfffffff607077824 */
/* 0x000fca00078e0202 */
/*00f0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z19shmem_reduce_kernelPiPKii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R6, c[0x0][0x0], R7 ; /* 0x0000000006007a24 */
/* 0x001fc800078e0207 */
/*0060*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe40000000800 */
/*0090*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00a0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf05270 */
/*00b0*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041e80000004800 */
/*00c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00d0*/ @!P0 BRA 0x1f0 ; /* 0x0000011000008947 */
/* 0x000fea0003800000 */
/*00e0*/ SHF.L.U32 R2, R7, 0x2, RZ ; /* 0x0000000207027819 */
/* 0x001fe200000006ff */
/*00f0*/ IMAD.U32 R3, RZ, RZ, UR4 ; /* 0x00000004ff037e24 */
/* 0x000fca000f8e00ff */
/*0100*/ IADD3 R4, R0, R3, RZ ; /* 0x0000000300047210 */
/* 0x000fe20007ffe0ff */
/*0110*/ BSSY B0, 0x1b0 ; /* 0x0000009000007945 */
/* 0x000fe60003800000 */
/*0120*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fc80003f06070 */
/*0130*/ ISETP.GE.U32.OR P0, PT, R7, R3, P0 ; /* 0x000000030700720c */
/* 0x000fda0000706470 */
/*0140*/ @P0 BRA 0x1a0 ; /* 0x0000005000000947 */
/* 0x001fea0003800000 */
/*0150*/ IMAD R5, R3, 0x4, R2 ; /* 0x0000000403057824 */
/* 0x000fe200078e0202 */
/*0160*/ LDS R4, [R7.X4] ; /* 0x0000000007047984 */
/* 0x000fea0000004800 */
/*0170*/ LDS R5, [R5] ; /* 0x0000000005057984 */
/* 0x000e240000000800 */
/*0180*/ ISETP.GT.AND P0, PT, R4, R5, PT ; /* 0x000000050400720c */
/* 0x001fda0003f04270 */
/*0190*/ @P0 STS [R7.X4], R5 ; /* 0x0000000507000388 */
/* 0x0001e40000004800 */
/*01a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*01b0*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fe20000011603 */
/*01c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe60000010000 */
/*01d0*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f05270 */
/*01e0*/ @P0 BRA 0x100 ; /* 0xffffff1000000947 */
/* 0x000fea000383ffff */
/*01f0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x001fda0003f05270 */
/*0200*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0210*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*0220*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fd400000001ff */
/*0230*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fca00078e0003 */
/*0240*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0250*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0260*/ BRA 0x260; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19shmem_reduce_kernelPiPKii
.globl _Z19shmem_reduce_kernelPiPKii
.p2align 8
.type _Z19shmem_reduce_kernelPiPKii,@function
_Z19shmem_reduce_kernelPiPKii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[6:7], s[0:1], 0x8
s_mov_b32 s4, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s4, s3, v[0:1]
s_cmp_lt_u32 s3, 2
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s6, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v3, vcc_lo
global_load_b32 v3, v[2:3], off
v_lshl_add_u32 v2, v0, 2, 0
s_waitcnt vmcnt(0)
ds_store_b32 v2, v3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_6
s_load_b32 s5, s[0:1], 0x10
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s2
s_cmp_lt_u32 s6, 4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_6
.LBB0_3:
s_mov_b32 s6, s3
s_lshr_b32 s3, s3, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v3, s3, v1
v_cmp_gt_u32_e32 vcc_lo, s3, v0
s_waitcnt lgkmcnt(0)
v_cmp_gt_u32_e64 s2, s5, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s7, vcc_lo, s2
s_and_saveexec_b32 s2, s7
s_cbranch_execz .LBB0_2
v_add_nc_u32_e32 v3, s3, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v3, v3, 2, 0
ds_load_b32 v4, v2
ds_load_b32 v3, v3
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, v4, v3
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_2
ds_store_b32 v2, v3
s_branch .LBB0_2
.LBB0_6:
s_set_inst_prefetch_distance 0x2
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_8
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0
s_load_b64 s[0:1], s[0:1], 0x0
s_mov_b32 s5, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[2:3], s[4:5], 2
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v1, v0, s[0:1]
.LBB0_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19shmem_reduce_kernelPiPKii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19shmem_reduce_kernelPiPKii, .Lfunc_end0-_Z19shmem_reduce_kernelPiPKii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z17last_digit_kernelPiPKii
.globl _Z17last_digit_kernelPiPKii
.p2align 8
.type _Z17last_digit_kernelPiPKii,@function
_Z17last_digit_kernelPiPKii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB1_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_mul_hi_i32 v3, v2, 0x66666667
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v4, 31, v3
v_ashrrev_i32_e32 v3, 2, v3
v_add_nc_u32_e32 v3, v3, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, v3, 10
v_sub_nc_u32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB1_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17last_digit_kernelPiPKii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z17last_digit_kernelPiPKii, .Lfunc_end1-_Z17last_digit_kernelPiPKii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19shmem_reduce_kernelPiPKii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19shmem_reduce_kernelPiPKii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17last_digit_kernelPiPKii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17last_digit_kernelPiPKii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0009e6ab_00000000-6_q1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "r"
.LC1:
.string "./inp.txt"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "!! Error in opening data file \n"
.section .rodata.str1.1
.LC3:
.string "%d,"
.text
.globl _Z9read_dataPi
.type _Z9read_dataPi, @function
_Z9read_dataPi:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r15
leaq .LC0(%rip), %rsi
leaq .LC1(%rip), %rdi
call fopen@PLT
testq %rax, %rax
je .L9
movq %rax, %rbp
movl $4000000, %edi
call malloc@PLT
movq %rax, %r14
movq %rax, %rbx
movl $0, %r12d
leaq .LC3(%rip), %r13
.L5:
movq %rbp, %rdi
call feof@PLT
testl %eax, %eax
jne .L6
movq %rbx, %rdx
movq %r13, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
addq $4, %rbx
cmpl $1, %eax
jne .L6
addl $1, %r12d
jmp .L5
.L9:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L6:
movq %rbp, %rdi
call fclose@PLT
movl %r12d, (%r15)
movq %r14, %rax
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z9read_dataPi, .-_Z9read_dataPi
.globl _Z13round_up_pow2i
.type _Z13round_up_pow2i, @function
_Z13round_up_pow2i:
.LFB2058:
.cfi_startproc
endbr64
movl $1, %eax
cmpl $1, %edi
jle .L10
.L12:
addl %eax, %eax
cmpl %eax, %edi
jg .L12
.L10:
ret
.cfi_endproc
.LFE2058:
.size _Z13round_up_pow2i, .-_Z13round_up_pow2i
.globl _Z15calc_num_threadi
.type _Z15calc_num_threadi, @function
_Z15calc_num_threadi:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pxor %xmm0, %xmm0
cvtsi2sdl %edi, %xmm0
pxor %xmm1, %xmm1
ucomisd %xmm0, %xmm1
ja .L20
sqrtsd %xmm0, %xmm0
.L18:
cvttsd2sil %xmm0, %edi
call _Z13round_up_pow2i
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
call sqrt@PLT
jmp .L18
.cfi_endproc
.LFE2059:
.size _Z15calc_num_threadi, .-_Z15calc_num_threadi
.globl _Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii
.type _Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii, @function
_Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii:
.LFB2086:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L26
.L22:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L27
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19shmem_reduce_kernelPiPKii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L22
.L27:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii, .-_Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii
.globl _Z19shmem_reduce_kernelPiPKii
.type _Z19shmem_reduce_kernelPiPKii, @function
_Z19shmem_reduce_kernelPiPKii:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z19shmem_reduce_kernelPiPKii, .-_Z19shmem_reduce_kernelPiPKii
.globl _Z6reducePiS_S_i
.type _Z6reducePiS_S_i, @function
_Z6reducePiS_S_i:
.LFB2060:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $32, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %r13
movq %rsi, %r12
movq %rdx, %r14
movl %ecx, %ebp
movl %ecx, %edi
call _Z15calc_num_threadi
movl %eax, %ecx
leal -1(%rax,%rbp), %eax
cltd
idivl %ecx
movl %eax, %ebx
movl %ecx, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movslq %ecx, %rcx
movl $0, %r9d
leaq 0(,%rcx,4), %r8
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L34
.L31:
movl %ebx, %edi
call _Z13round_up_pow2i
movl %eax, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movslq %ebx, %rax
movl $0, %r9d
leaq 0(,%rax,4), %r8
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L35
.L30:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L34:
.cfi_restore_state
movl %ebp, %edx
movq %r14, %rsi
movq %r12, %rdi
call _Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii
jmp .L31
.L35:
movl %ebx, %edx
movq %r12, %rsi
movq %r13, %rdi
call _Z43__device_stub__Z19shmem_reduce_kernelPiPKiiPiPKii
jmp .L30
.cfi_endproc
.LFE2060:
.size _Z6reducePiS_S_i, .-_Z6reducePiS_S_i
.globl _Z41__device_stub__Z17last_digit_kernelPiPKiiPiPKii
.type _Z41__device_stub__Z17last_digit_kernelPiPKiiPiPKii, @function
_Z41__device_stub__Z17last_digit_kernelPiPKiiPiPKii:
.LFB2088:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L40
.L36:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L41
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L40:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17last_digit_kernelPiPKii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L36
.L41:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z41__device_stub__Z17last_digit_kernelPiPKiiPiPKii, .-_Z41__device_stub__Z17last_digit_kernelPiPKiiPiPKii
.globl _Z17last_digit_kernelPiPKii
.type _Z17last_digit_kernelPiPKii, @function
_Z17last_digit_kernelPiPKii:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z17last_digit_kernelPiPKiiPiPKii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z17last_digit_kernelPiPKii, .-_Z17last_digit_kernelPiPKii
.section .rodata.str1.8
.align 8
.LC5:
.string "!! Error: no devices supporting CUDA.\n"
.section .rodata.str1.1
.LC6:
.string "w"
.LC7:
.string "./q1a.txt"
.section .rodata.str1.8
.align 8
.LC8:
.string "!! Error in opening output file \n"
.section .rodata.str1.1
.LC9:
.string "%d"
.LC10:
.string "./q1b.txt"
.LC11:
.string ", "
.text
.globl main
.type main, @function
main:
.LFB2061:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rdi
call cudaGetDeviceCount@PLT
cmpl $0, 24(%rsp)
je .L57
movl $0, %edi
call cudaSetDevice@PLT
movl $0, 28(%rsp)
leaq 28(%rsp), %rdi
call _Z9read_dataPi
movq %rax, %rbp
movq %rax, 8(%rsp)
movl 28(%rsp), %r15d
movslq %r15d, %r14
leal 0(,%r15,4), %ebx
movslq %ebx, %rbx
leaq 40(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
leaq 64(%rsp), %rdi
call cudaEventCreate@PLT
leaq 72(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movl %r15d, %ecx
movq 40(%rsp), %rdx
movq 48(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z6reducePiS_S_i
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movq 72(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 32(%rsp), %rdi
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 36(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 56(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC6(%rip), %rsi
leaq .LC7(%rip), %rdi
call fopen@PLT
movq %rax, %rbp
testq %rax, %rax
je .L58
movl 36(%rsp), %ecx
leaq .LC9(%rip), %rdx
movl $2, %esi
movq %rax, %rdi
movl $0, %eax
call __fprintf_chk@PLT
movq %rbp, %rdi
call fclose@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rax
movq %rax, 56(%rsp)
movl %r15d, %edi
call _Z15calc_num_threadi
movl %eax, %ebp
leal -1(%r15,%rax), %eax
cltd
idivl %ebp
movl %eax, %r12d
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movl %ebp, 92(%rsp)
movl $1, 96(%rsp)
movl %r12d, 80(%rsp)
movl $1, 84(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L59
.L47:
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movq 72(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 32(%rsp), %rdi
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
call cudaEventElapsedTime@PLT
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r12
movl $2, %ecx
movq %rbx, %rdx
movq 56(%rsp), %rsi
movq %rax, %rdi
call cudaMemcpy@PLT
leaq .LC6(%rip), %rsi
leaq .LC10(%rip), %rdi
call fopen@PLT
movq %rax, %rbp
testq %rax, %rax
je .L48
movl $0, %ebx
leal -1(%r15), %r13d
testl %r15d, %r15d
jg .L49
.L50:
movq %rbp, %rdi
call fclose@PLT
movq 8(%rsp), %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L60
movl $0, %eax
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L57:
.cfi_restore_state
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L58:
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L59:
movl %r15d, %edx
movq 40(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z41__device_stub__Z17last_digit_kernelPiPKiiPiPKii
jmp .L47
.L48:
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L61:
leaq .LC11(%rip), %rdx
movl $2, %esi
movq %rbp, %rdi
movl $0, %eax
call __fprintf_chk@PLT
.L51:
addq $1, %rbx
cmpq %rbx, %r14
je .L50
.L49:
movl (%r12,%rbx,4), %ecx
leaq .LC9(%rip), %rdx
movl $2, %esi
movq %rbp, %rdi
movl $0, %eax
call __fprintf_chk@PLT
cmpl %ebx, %r13d
jle .L51
jmp .L61
.L60:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size main, .-main
.section .rodata.str1.1
.LC12:
.string "_Z17last_digit_kernelPiPKii"
.LC13:
.string "_Z19shmem_reduce_kernelPiPKii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2091:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z17last_digit_kernelPiPKii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z19shmem_reduce_kernelPiPKii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "q1.hip"
.globl _Z9read_dataPi # -- Begin function _Z9read_dataPi
.p2align 4, 0x90
.type _Z9read_dataPi,@function
_Z9read_dataPi: # @_Z9read_dataPi
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl $.L.str, %edi
movl $.L.str.1, %esi
callq fopen
testq %rax, %rax
je .LBB0_6
# %bb.1:
movq %rax, %r14
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r15
movq %r14, %rdi
callq feof
xorl %r13d, %r13d
testl %eax, %eax
jne .LBB0_5
# %bb.2: # %.lr.ph.preheader
movq %r15, %r12
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB0_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str.3, %esi
movq %r14, %rdi
movq %r12, %rdx
xorl %eax, %eax
callq __isoc23_fscanf
cmpl $1, %eax
jne .LBB0_5
# %bb.4: # in Loop: Header=BB0_3 Depth=1
incq %r13
movq %r14, %rdi
callq feof
addq $4, %r12
testl %eax, %eax
je .LBB0_3
.LBB0_5: # %._crit_edge
movq %r14, %rdi
callq fclose
movl %r13d, (%rbx)
movq %r15, %rax
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB0_6:
.cfi_def_cfa_offset 48
movl $.Lstr, %edi
callq puts@PLT
movl $1, %edi
callq exit
.Lfunc_end0:
.size _Z9read_dataPi, .Lfunc_end0-_Z9read_dataPi
.cfi_endproc
# -- End function
.globl _Z13round_up_pow2i # -- Begin function _Z13round_up_pow2i
.p2align 4, 0x90
.type _Z13round_up_pow2i,@function
_Z13round_up_pow2i: # @_Z13round_up_pow2i
.cfi_startproc
# %bb.0:
testl %edi, %edi
je .LBB1_1
# %bb.2: # %.preheader.preheader
movl $1, %ecx
.p2align 4, 0x90
.LBB1_3: # %.preheader
# =>This Inner Loop Header: Depth=1
movl %ecx, %eax
leal (%rax,%rax), %ecx
cmpl %edi, %eax
jl .LBB1_3
# %bb.4: # %.loopexit
# kill: def $eax killed $eax killed $rax
retq
.LBB1_1:
movl $1, %eax
# kill: def $eax killed $eax killed $rax
retq
.Lfunc_end1:
.size _Z13round_up_pow2i, .Lfunc_end1-_Z13round_up_pow2i
.cfi_endproc
# -- End function
.globl _Z15calc_num_threadi # -- Begin function _Z15calc_num_threadi
.p2align 4, 0x90
.type _Z15calc_num_threadi,@function
_Z15calc_num_threadi: # @_Z15calc_num_threadi
.cfi_startproc
# %bb.0:
cvtsi2sd %edi, %xmm0
xorpd %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jb .LBB2_2
# %bb.1:
sqrtsd %xmm0, %xmm0
jmp .LBB2_3
.LBB2_2: # %call.sqrt
pushq %rax
.cfi_def_cfa_offset 16
callq sqrt
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_3: # %.split
cvttsd2si %xmm0, %ecx
testl %ecx, %ecx
je .LBB2_4
# %bb.5: # %.preheader.i.preheader
movl $1, %edx
.p2align 4, 0x90
.LBB2_6: # %.preheader.i
# =>This Inner Loop Header: Depth=1
movl %edx, %eax
leal (%rax,%rax), %edx
cmpl %ecx, %eax
jl .LBB2_6
# %bb.7: # %_Z13round_up_pow2i.exit
# kill: def $eax killed $eax killed $rax
retq
.LBB2_4:
movl $1, %eax
# kill: def $eax killed $eax killed $rax
retq
.Lfunc_end2:
.size _Z15calc_num_threadi, .Lfunc_end2-_Z15calc_num_threadi
.cfi_endproc
# -- End function
.globl _Z34__device_stub__shmem_reduce_kernelPiPKii # -- Begin function _Z34__device_stub__shmem_reduce_kernelPiPKii
.p2align 4, 0x90
.type _Z34__device_stub__shmem_reduce_kernelPiPKii,@function
_Z34__device_stub__shmem_reduce_kernelPiPKii: # @_Z34__device_stub__shmem_reduce_kernelPiPKii
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19shmem_reduce_kernelPiPKii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end3:
.size _Z34__device_stub__shmem_reduce_kernelPiPKii, .Lfunc_end3-_Z34__device_stub__shmem_reduce_kernelPiPKii
.cfi_endproc
# -- End function
.globl _Z6reducePiS_S_i # -- Begin function _Z6reducePiS_S_i
.p2align 4, 0x90
.type _Z6reducePiS_S_i,@function
_Z6reducePiS_S_i: # @_Z6reducePiS_S_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $104, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %r12d
movq %rdx, %r13
movq %rsi, %rbx
cvtsi2sd %ecx, %xmm0
movq %rdi, %r14
xorpd %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jb .LBB4_2
# %bb.1:
sqrtsd %xmm0, %xmm0
jmp .LBB4_3
.LBB4_2: # %call.sqrt
callq sqrt
.LBB4_3: # %.split
cvttsd2si %xmm0, %eax
testl %eax, %eax
je .LBB4_4
# %bb.5: # %.preheader.i.i.preheader
movl $1, %edx
.p2align 4, 0x90
.LBB4_6: # %.preheader.i.i
# =>This Inner Loop Header: Depth=1
movl %edx, %ecx
leal (%rcx,%rcx), %edx
cmpl %eax, %ecx
jl .LBB4_6
jmp .LBB4_7
.LBB4_4:
movl $1, %ecx
.LBB4_7: # %_Z15calc_num_threadi.exit
movabsq $4294967296, %rbp # imm = 0x100000000
leal (%r12,%rcx), %eax
decl %eax
cltd
idivl %ecx
movl %eax, %r15d
movslq %ecx, %rax
leaq (,%rax,4), %r8
leaq (%r15,%rbp), %rdi
movl %eax, %edx
orq %rbp, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_9
# %bb.8:
movq %rbx, 72(%rsp)
movq %r13, 64(%rsp)
movl %r12d, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19shmem_reduce_kernelPiPKii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_9:
leaq 1(%rbp), %rdi
movq %rdi, %rdx
testl %r15d, %r15d
je .LBB4_13
# %bb.10: # %.preheader.i.preheader
movl $1, %ecx
.p2align 4, 0x90
.LBB4_11: # %.preheader.i
# =>This Inner Loop Header: Depth=1
movl %ecx, %eax
leal (%rax,%rax), %ecx
cmpl %r15d, %eax
jl .LBB4_11
# %bb.12: # %_Z13round_up_pow2i.exit.loopexit
movl %eax, %edx
orq %rbp, %rdx
.LBB4_13: # %_Z13round_up_pow2i.exit
movslq %r15d, %r8
shlq $2, %r8
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_15
# %bb.14:
movq %r14, 72(%rsp)
movq %rbx, 64(%rsp)
movl %r15d, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19shmem_reduce_kernelPiPKii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_15:
addq $104, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z6reducePiS_S_i, .Lfunc_end4-_Z6reducePiS_S_i
.cfi_endproc
# -- End function
.globl _Z32__device_stub__last_digit_kernelPiPKii # -- Begin function _Z32__device_stub__last_digit_kernelPiPKii
.p2align 4, 0x90
.type _Z32__device_stub__last_digit_kernelPiPKii,@function
_Z32__device_stub__last_digit_kernelPiPKii: # @_Z32__device_stub__last_digit_kernelPiPKii
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17last_digit_kernelPiPKii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end5:
.size _Z32__device_stub__last_digit_kernelPiPKii, .Lfunc_end5-_Z32__device_stub__last_digit_kernelPiPKii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 56(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 56(%rsp)
je .LBB6_1
# %bb.3:
xorl %edi, %edi
callq hipSetDevice
movl $0, 20(%rsp)
leaq 20(%rsp), %rdi
callq _Z9read_dataPi
movq %rax, %rbx
movl 20(%rsp), %r14d
leal (,%r14,4), %eax
movslq %eax, %r15
leaq 32(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
leaq 40(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4, %esi
callq hipMalloc
movq 32(%rsp), %rdi
movq %rbx, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 24(%rsp), %rdi
callq hipEventCreate
movq %rsp, %rdi
callq hipEventCreate
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
movq 40(%rsp), %rsi
movq 32(%rsp), %rdx
movl %r14d, %ecx
callq _Z6reducePiS_S_i
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq (%rsp), %rdx
leaq 52(%rsp), %rdi
callq hipEventElapsedTime
movq 8(%rsp), %rsi
leaq 48(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl $.L.str.5, %edi
movl $.L.str.6, %esi
callq fopen
testq %rax, %rax
je .LBB6_4
# %bb.5:
movq %rax, %r12
movl 48(%rsp), %edx
movl $.L.str.8, %esi
movq %rax, %rdi
xorl %eax, %eax
callq fprintf
movq %r12, %rdi
callq fclose
movq 8(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rax
cvtsi2sd %r14d, %xmm0
movq %rax, 8(%rsp)
xorpd %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jb .LBB6_7
# %bb.6:
sqrtsd %xmm0, %xmm0
jmp .LBB6_8
.LBB6_7: # %call.sqrt
callq sqrt
.LBB6_8: # %.split
cvttsd2si %xmm0, %eax
testl %eax, %eax
je .LBB6_9
# %bb.10: # %.preheader.i.i.preheader
movl $1, %ecx
.p2align 4, 0x90
.LBB6_11: # %.preheader.i.i
# =>This Inner Loop Header: Depth=1
movl %ecx, %r13d
leal (,%r13,2), %ecx
cmpl %eax, %r13d
jl .LBB6_11
jmp .LBB6_12
.LBB6_9:
movl $1, %r13d
.LBB6_12: # %_Z15calc_num_threadi.exit
leal (%r14,%r13), %eax
decl %eax
cltd
idivl %r13d
movl %eax, %r12d
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %r12
movl %r13d, %edx
orq %rax, %rdx
movq %r12, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_14
# %bb.13:
movq 8(%rsp), %rax
movq 32(%rsp), %rcx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movl %r14d, 60(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 60(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 64(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z17last_digit_kernelPiPKii, %edi
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_14:
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq (%rsp), %rdx
leaq 52(%rsp), %rdi
callq hipEventElapsedTime
movq %r15, %rdi
callq malloc
movq %rax, %r12
movq 8(%rsp), %rsi
movq %rax, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
movl $.L.str.9, %edi
movl $.L.str.6, %esi
callq fopen
testq %rax, %rax
je .LBB6_4
# %bb.15: # %.preheader
movq %rax, %r15
testl %r14d, %r14d
jle .LBB6_20
# %bb.16: # %.lr.ph
movslq %r14d, %r13
decq %r13
xorl %ebp, %ebp
jmp .LBB6_17
.p2align 4, 0x90
.LBB6_19: # in Loop: Header=BB6_17 Depth=1
incq %rbp
cmpq %rbp, %r14
je .LBB6_20
.LBB6_17: # =>This Inner Loop Header: Depth=1
movl (%r12,%rbp,4), %edx
movl $.L.str.8, %esi
movq %r15, %rdi
xorl %eax, %eax
callq fprintf
cmpq %r13, %rbp
jge .LBB6_19
# %bb.18: # in Loop: Header=BB6_17 Depth=1
movl $.L.str.10, %edi
movl $2, %esi
movl $1, %edx
movq %r15, %rcx
callq fwrite@PLT
jmp .LBB6_19
.LBB6_20: # %._crit_edge
movq %r15, %rdi
callq fclose
movq %rbx, %rdi
callq free
movq %r12, %rdi
callq free
movq 32(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB6_4:
.cfi_def_cfa_offset 208
movl $.Lstr.2, %edi
jmp .LBB6_2
.LBB6_1:
movl $.Lstr.3, %edi
.LBB6_2:
callq puts@PLT
movl $1, %edi
callq exit
.Lfunc_end6:
.size main, .Lfunc_end6-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19shmem_reduce_kernelPiPKii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17last_digit_kernelPiPKii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "./inp.txt"
.size .L.str, 10
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "r"
.size .L.str.1, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "%d,"
.size .L.str.3, 4
.type _Z19shmem_reduce_kernelPiPKii,@object # @_Z19shmem_reduce_kernelPiPKii
.section .rodata,"a",@progbits
.globl _Z19shmem_reduce_kernelPiPKii
.p2align 3, 0x0
_Z19shmem_reduce_kernelPiPKii:
.quad _Z34__device_stub__shmem_reduce_kernelPiPKii
.size _Z19shmem_reduce_kernelPiPKii, 8
.type _Z17last_digit_kernelPiPKii,@object # @_Z17last_digit_kernelPiPKii
.globl _Z17last_digit_kernelPiPKii
.p2align 3, 0x0
_Z17last_digit_kernelPiPKii:
.quad _Z32__device_stub__last_digit_kernelPiPKii
.size _Z17last_digit_kernelPiPKii, 8
.type .L.str.5,@object # @.str.5
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.5:
.asciz "./q1a.txt"
.size .L.str.5, 10
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "w"
.size .L.str.6, 2
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%d"
.size .L.str.8, 3
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "./q1b.txt"
.size .L.str.9, 10
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz ", "
.size .L.str.10, 3
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z19shmem_reduce_kernelPiPKii"
.size .L__unnamed_1, 30
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z17last_digit_kernelPiPKii"
.size .L__unnamed_2, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "!! Error in opening data file "
.size .Lstr, 31
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "!! Error in opening output file "
.size .Lstr.2, 33
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "!! Error: no devices supporting CUDA."
.size .Lstr.3, 38
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__shmem_reduce_kernelPiPKii
.addrsig_sym _Z32__device_stub__last_digit_kernelPiPKii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19shmem_reduce_kernelPiPKii
.addrsig_sym _Z17last_digit_kernelPiPKii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void compute_row_on_Gamma_matrix_kernel(int row_index, int vertex_index, int* indices, double* exp_V, double* N_ptr, int LD_N, double* G_ptr, int LD_G, double* row_ptr, int incr) {
// int l = threadIdx.x;
int l = blockIdx.x;
int i_index, j_index;
double delta;
i_index = indices[row_index];
j_index = indices[l];
if (j_index < vertex_index) {
delta = i_index == j_index ? 1 : 0;
row_ptr[l * incr] = (N_ptr[i_index + LD_N * j_index] * exp_V[l] - delta) / (exp_V[l] - 1.);
}
else
row_ptr[l * incr] = G_ptr[i_index + LD_G * (j_index - vertex_index)];
} | code for sm_80
Function : _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R9, SR_CTAID.X ; /* 0x0000000000097919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff057624 */
/* 0x000fe400078e00ff */
/*0050*/ IMAD.WIDE R6, R9, R4, c[0x0][0x168] ; /* 0x00005a0009067625 */
/* 0x001fca00078e0204 */
/*0060*/ LDG.E R0, [R6.64] ; /* 0x0000000406007981 */
/* 0x000ea2000c1e1900 */
/*0070*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fca00078e0205 */
/*0080*/ LDG.E R15, [R4.64] ; /* 0x00000004040f7981 */
/* 0x000162000c1e1900 */
/*0090*/ IMAD.MOV.U32 R11, RZ, RZ, 0x8 ; /* 0x00000008ff0b7424 */
/* 0x000fe200078e00ff */
/*00a0*/ SHF.R.S32.HI R10, RZ, 0x1f, R9 ; /* 0x0000001fff0a7819 */
/* 0x000fe20000011409 */
/*00b0*/ IMAD R2, R9, c[0x0][0x1a0], RZ ; /* 0x0000680009027a24 */
/* 0x000fc800078e02ff */
/*00c0*/ IMAD.WIDE R2, R2, R11, c[0x0][0x198] ; /* 0x0000660002027625 */
/* 0x000fe200078e020b */
/*00d0*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x164], PT ; /* 0x0000590000007a0c */
/* 0x004fda0003f06270 */
/*00e0*/ @!P0 BRA 0x150 ; /* 0x0000006000008947 */
/* 0x000fea0003800000 */
/*00f0*/ IADD3 R0, R0, -c[0x0][0x164], RZ ; /* 0x8000590000007a10 */
/* 0x001fca0007ffe0ff */
/*0100*/ IMAD R0, R0, c[0x0][0x190], R15 ; /* 0x0000640000007a24 */
/* 0x020fc800078e020f */
/*0110*/ IMAD.WIDE R4, R0, R11, c[0x0][0x188] ; /* 0x0000620000047625 */
/* 0x000fcc00078e020b */
/*0120*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1b00 */
/*0130*/ STG.E.64 [R2.64], R4 ; /* 0x0000000402007986 */
/* 0x004fe2000c101b04 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ LEA R8, P0, R9, c[0x0][0x170], 0x3 ; /* 0x00005c0009087a11 */
/* 0x001fc800078018ff */
/*0160*/ LEA.HI.X R9, R9, c[0x0][0x174], R10, 0x3, P0 ; /* 0x00005d0009097a11 */
/* 0x000fcc00000f1c0a */
/*0170*/ LDG.E.64 R8, [R8.64] ; /* 0x0000000408087981 */
/* 0x000ea2000c1e1b00 */
/*0180*/ IMAD R4, R0, c[0x0][0x180], R15 ; /* 0x0000600000047a24 */
/* 0x020fc800078e020f */
/*0190*/ IMAD.WIDE R4, R4, R11, c[0x0][0x178] ; /* 0x00005e0004047625 */
/* 0x000fcc00078e020b */
/*01a0*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ee2000c1e1b00 */
/*01b0*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */
/* 0x000fe200078e00ff */
/*01c0*/ ISETP.NE.AND P0, PT, R15, R0, PT ; /* 0x000000000f00720c */
/* 0x000fe20003f05270 */
/*01d0*/ IMAD.MOV.U32 R14, RZ, RZ, RZ ; /* 0x000000ffff0e7224 */
/* 0x000fc600078e00ff */
/*01e0*/ FSEL R15, RZ, 1.875, P0 ; /* 0x3ff00000ff0f7808 */
/* 0x000fe20000000000 */
/*01f0*/ DADD R6, R8, -1 ; /* 0xbff0000008067429 */
/* 0x004e0c0000000000 */
/*0200*/ MUFU.RCP64H R11, R7 ; /* 0x00000007000b7308 */
/* 0x001e220000001800 */
/*0210*/ DFMA R8, R8, R4, -R14 ; /* 0x000000040808722b */
/* 0x008e54000000080e */
/*0220*/ FSETP.GEU.AND P1, PT, |R9|, 6.5827683646048100446e-37, PT ; /* 0x036000000900780b */
/* 0x002fe20003f2e200 */
/*0230*/ DFMA R12, -R6, R10, 1 ; /* 0x3ff00000060c742b */
/* 0x001e0c000000010a */
/*0240*/ DFMA R12, R12, R12, R12 ; /* 0x0000000c0c0c722b */
/* 0x001e0c000000000c */
/*0250*/ DFMA R12, R10, R12, R10 ; /* 0x0000000c0a0c722b */
/* 0x001e0c000000000a */
/*0260*/ DFMA R10, -R6, R12, 1 ; /* 0x3ff00000060a742b */
/* 0x001e0c000000010c */
/*0270*/ DFMA R10, R12, R10, R12 ; /* 0x0000000a0c0a722b */
/* 0x001e0c000000000c */
/*0280*/ DMUL R4, R8, R10 ; /* 0x0000000a08047228 */
/* 0x001e0c0000000000 */
/*0290*/ DFMA R12, -R6, R4, R8 ; /* 0x00000004060c722b */
/* 0x001e0c0000000108 */
/*02a0*/ DFMA R4, R10, R12, R4 ; /* 0x0000000c0a04722b */
/* 0x001e140000000004 */
/*02b0*/ FFMA R0, RZ, R7, R5 ; /* 0x00000007ff007223 */
/* 0x001fca0000000005 */
/*02c0*/ FSETP.GT.AND P0, PT, |R0|, 1.469367938527859385e-39, PT ; /* 0x001000000000780b */
/* 0x000fda0003f04200 */
/*02d0*/ @P0 BRA P1, 0x320 ; /* 0x0000004000000947 */
/* 0x000fea0000800000 */
/*02e0*/ MOV R0, 0x300 ; /* 0x0000030000007802 */
/* 0x000fe40000000f00 */
/*02f0*/ CALL.REL.NOINC 0x340 ; /* 0x0000004000007944 */
/* 0x000fea0003c00000 */
/*0300*/ IMAD.MOV.U32 R4, RZ, RZ, R14 ; /* 0x000000ffff047224 */
/* 0x000fe400078e000e */
/*0310*/ IMAD.MOV.U32 R5, RZ, RZ, R15 ; /* 0x000000ffff057224 */
/* 0x000fca00078e000f */
/*0320*/ STG.E.64 [R2.64], R4 ; /* 0x0000000402007986 */
/* 0x000fe2000c101b04 */
/*0330*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0340*/ FSETP.GEU.AND P0, PT, |R7|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000000700780b */
/* 0x040fe20003f0e200 */
/*0350*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */
/* 0x000fe200078e00ff */
/*0360*/ LOP3.LUT R4, R7.reuse, 0x800fffff, RZ, 0xc0, !PT ; /* 0x800fffff07047812 */
/* 0x040fe200078ec0ff */
/*0370*/ IMAD.MOV.U32 R15, RZ, RZ, 0x1ca00000 ; /* 0x1ca00000ff0f7424 */
/* 0x000fe200078e00ff */
/*0380*/ LOP3.LUT R16, R7, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000007107812 */
/* 0x000fe400078ec0ff */
/*0390*/ LOP3.LUT R5, R4, 0x3ff00000, RZ, 0xfc, !PT ; /* 0x3ff0000004057812 */
/* 0x000fe200078efcff */
/*03a0*/ IMAD.MOV.U32 R4, RZ, RZ, R6 ; /* 0x000000ffff047224 */
/* 0x000fe200078e0006 */
/*03b0*/ LOP3.LUT R14, R9, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff00000090e7812 */
/* 0x000fc800078ec0ff */
/*03c0*/ ISETP.GE.U32.AND P1, PT, R14, R16, PT ; /* 0x000000100e00720c */
/* 0x000fe20003f26070 */
/*03d0*/ @!P0 DMUL R4, R6, 8.98846567431157953865e+307 ; /* 0x7fe0000006048828 */
/* 0x000e060000000000 */
/*03e0*/ SEL R17, R15, 0x63400000, !P1 ; /* 0x634000000f117807 */
/* 0x000fe40004800000 */
/*03f0*/ FSETP.GEU.AND P1, PT, |R9|, 1.469367938527859385e-39, PT ; /* 0x001000000900780b */
/* 0x000fe20003f2e200 */
/*0400*/ MUFU.RCP64H R11, R5 ; /* 0x00000005000b7308 */
/* 0x001e280000001800 */
/*0410*/ @!P0 LOP3.LUT R16, R5, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000005108812 */
/* 0x000fe200078ec0ff */
/*0420*/ DFMA R12, R10, -R4, 1 ; /* 0x3ff000000a0c742b */
/* 0x001e0c0000000804 */
/*0430*/ DFMA R12, R12, R12, R12 ; /* 0x0000000c0c0c722b */
/* 0x001e0c000000000c */
/*0440*/ DFMA R12, R10, R12, R10 ; /* 0x0000000c0a0c722b */
/* 0x001e0c000000000a */
/*0450*/ DFMA R10, R12, -R4, 1 ; /* 0x3ff000000c0a742b */
/* 0x001e0c0000000804 */
/*0460*/ DFMA R12, R12, R10, R12 ; /* 0x0000000a0c0c722b */
/* 0x001064000000000c */
/*0470*/ LOP3.LUT R11, R17, 0x800fffff, R9, 0xf8, !PT ; /* 0x800fffff110b7812 */
/* 0x001fe200078ef809 */
/*0480*/ IMAD.MOV.U32 R17, RZ, RZ, R14 ; /* 0x000000ffff117224 */
/* 0x000fe400078e000e */
/*0490*/ IMAD.MOV.U32 R10, RZ, RZ, R8 ; /* 0x000000ffff0a7224 */
/* 0x000fe200078e0008 */
/*04a0*/ @P1 BRA 0x530 ; /* 0x0000008000001947 */
/* 0x000fea0003800000 */
/*04b0*/ LOP3.LUT R17, R7, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000007117812 */
/* 0x002fe200078ec0ff */
/*04c0*/ IMAD.MOV.U32 R18, RZ, RZ, RZ ; /* 0x000000ffff127224 */
/* 0x000fc600078e00ff */
/*04d0*/ ISETP.GE.U32.AND P0, PT, R14, R17, PT ; /* 0x000000110e00720c */
/* 0x000fc80003f06070 */
/*04e0*/ SEL R17, R15, 0x63400000, !P0 ; /* 0x634000000f117807 */
/* 0x000fc80004000000 */
/*04f0*/ LOP3.LUT R17, R17, 0x80000000, R9, 0xf8, !PT ; /* 0x8000000011117812 */
/* 0x000fc800078ef809 */
/*0500*/ LOP3.LUT R19, R17, 0x100000, RZ, 0xfc, !PT ; /* 0x0010000011137812 */
/* 0x000fcc00078efcff */
/*0510*/ DFMA R10, R10, 2, -R18 ; /* 0x400000000a0a782b */
/* 0x000e140000000812 */
/*0520*/ LOP3.LUT R17, R11, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff000000b117812 */
/* 0x001fc800078ec0ff */
/*0530*/ IADD3 R20, R17, -0x1, RZ ; /* 0xffffffff11147810 */
/* 0x002fe20007ffe0ff */
/*0540*/ DMUL R18, R12, R10 ; /* 0x0000000a0c127228 */
/* 0x000e220000000000 */
/*0550*/ IADD3 R22, R16, -0x1, RZ ; /* 0xffffffff10167810 */
/* 0x000fe40007ffe0ff */
/*0560*/ ISETP.GT.U32.AND P0, PT, R20, 0x7feffffe, PT ; /* 0x7feffffe1400780c */
/* 0x000fc60003f04070 */
/*0570*/ DFMA R20, R18, -R4, R10 ; /* 0x800000041214722b */
/* 0x001e22000000000a */
/*0580*/ ISETP.GT.U32.OR P0, PT, R22, 0x7feffffe, P0 ; /* 0x7feffffe1600780c */
/* 0x000fca0000704470 */
/*0590*/ DFMA R12, R12, R20, R18 ; /* 0x000000140c0c722b */
/* 0x0010500000000012 */
/*05a0*/ @P0 BRA 0x770 ; /* 0x000001c000000947 */
/* 0x000fea0003800000 */
/*05b0*/ LOP3.LUT R9, R7, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000007097812 */
/* 0x003fc800078ec0ff */
/*05c0*/ ISETP.GE.U32.AND P0, PT, R14.reuse, R9, PT ; /* 0x000000090e00720c */
/* 0x040fe20003f06070 */
/*05d0*/ IMAD.IADD R8, R14, 0x1, -R9 ; /* 0x000000010e087824 */
/* 0x000fc600078e0a09 */
/*05e0*/ SEL R15, R15, 0x63400000, !P0 ; /* 0x634000000f0f7807 */
/* 0x000fe40004000000 */
/*05f0*/ IMNMX R8, R8, -0x46a00000, !PT ; /* 0xb960000008087817 */
/* 0x000fc80007800200 */
/*0600*/ IMNMX R8, R8, 0x46a00000, PT ; /* 0x46a0000008087817 */
/* 0x000fca0003800200 */
/*0610*/ IMAD.IADD R16, R8, 0x1, -R15 ; /* 0x0000000108107824 */
/* 0x000fe400078e0a0f */
/*0620*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x000fc600078e00ff */
/*0630*/ IADD3 R9, R16, 0x7fe00000, RZ ; /* 0x7fe0000010097810 */
/* 0x000fcc0007ffe0ff */
/*0640*/ DMUL R14, R12, R8 ; /* 0x000000080c0e7228 */
/* 0x000e140000000000 */
/*0650*/ FSETP.GTU.AND P0, PT, |R15|, 1.469367938527859385e-39, PT ; /* 0x001000000f00780b */
/* 0x001fda0003f0c200 */
/*0660*/ @P0 BRA 0x8c0 ; /* 0x0000025000000947 */
/* 0x000fea0003800000 */
/*0670*/ DFMA R4, R12, -R4, R10 ; /* 0x800000040c04722b */
/* 0x000e22000000000a */
/*0680*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x000fd200078e00ff */
/*0690*/ FSETP.NEU.AND P0, PT, R5.reuse, RZ, PT ; /* 0x000000ff0500720b */
/* 0x041fe40003f0d000 */
/*06a0*/ LOP3.LUT R7, R5, 0x80000000, R7, 0x48, !PT ; /* 0x8000000005077812 */
/* 0x000fc800078e4807 */
/*06b0*/ LOP3.LUT R9, R7, R9, RZ, 0xfc, !PT ; /* 0x0000000907097212 */
/* 0x000fce00078efcff */
/*06c0*/ @!P0 BRA 0x8c0 ; /* 0x000001f000008947 */
/* 0x000fea0003800000 */
/*06d0*/ IMAD.MOV R5, RZ, RZ, -R16 ; /* 0x000000ffff057224 */
/* 0x000fe200078e0a10 */
/*06e0*/ DMUL.RP R8, R12, R8 ; /* 0x000000080c087228 */
/* 0x000e220000008000 */
/*06f0*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fcc00078e00ff */
/*0700*/ DFMA R4, R14, -R4, R12 ; /* 0x800000040e04722b */
/* 0x000e46000000000c */
/*0710*/ LOP3.LUT R7, R9, R7, RZ, 0x3c, !PT ; /* 0x0000000709077212 */
/* 0x001fc600078e3cff */
/*0720*/ IADD3 R4, -R16, -0x43300000, RZ ; /* 0xbcd0000010047810 */
/* 0x002fc80007ffe1ff */
/*0730*/ FSETP.NEU.AND P0, PT, |R5|, R4, PT ; /* 0x000000040500720b */
/* 0x000fc80003f0d200 */
/*0740*/ FSEL R14, R8, R14, !P0 ; /* 0x0000000e080e7208 */
/* 0x000fe40004000000 */
/*0750*/ FSEL R15, R7, R15, !P0 ; /* 0x0000000f070f7208 */
/* 0x000fe20004000000 */
/*0760*/ BRA 0x8c0 ; /* 0x0000015000007947 */
/* 0x000fea0003800000 */
/*0770*/ DSETP.NAN.AND P0, PT, R8, R8, PT ; /* 0x000000080800722a */
/* 0x003e1c0003f08000 */
/*0780*/ @P0 BRA 0x8a0 ; /* 0x0000011000000947 */
/* 0x001fea0003800000 */
/*0790*/ DSETP.NAN.AND P0, PT, R6, R6, PT ; /* 0x000000060600722a */
/* 0x000e1c0003f08000 */
/*07a0*/ @P0 BRA 0x870 ; /* 0x000000c000000947 */
/* 0x001fea0003800000 */
/*07b0*/ ISETP.NE.AND P0, PT, R17, R16, PT ; /* 0x000000101100720c */
/* 0x000fe20003f05270 */
/*07c0*/ IMAD.MOV.U32 R14, RZ, RZ, 0x0 ; /* 0x00000000ff0e7424 */
/* 0x000fe400078e00ff */
/*07d0*/ IMAD.MOV.U32 R15, RZ, RZ, -0x80000 ; /* 0xfff80000ff0f7424 */
/* 0x000fd400078e00ff */
/*07e0*/ @!P0 BRA 0x8c0 ; /* 0x000000d000008947 */
/* 0x000fea0003800000 */
/*07f0*/ ISETP.NE.AND P0, PT, R17, 0x7ff00000, PT ; /* 0x7ff000001100780c */
/* 0x000fe40003f05270 */
/*0800*/ LOP3.LUT R15, R9, 0x80000000, R7, 0x48, !PT ; /* 0x80000000090f7812 */
/* 0x000fe400078e4807 */
/*0810*/ ISETP.EQ.OR P0, PT, R16, RZ, !P0 ; /* 0x000000ff1000720c */
/* 0x000fda0004702670 */
/*0820*/ @P0 LOP3.LUT R4, R15, 0x7ff00000, RZ, 0xfc, !PT ; /* 0x7ff000000f040812 */
/* 0x000fe200078efcff */
/*0830*/ @!P0 IMAD.MOV.U32 R14, RZ, RZ, RZ ; /* 0x000000ffff0e8224 */
/* 0x000fe400078e00ff */
/*0840*/ @P0 IMAD.MOV.U32 R14, RZ, RZ, RZ ; /* 0x000000ffff0e0224 */
/* 0x000fe400078e00ff */
/*0850*/ @P0 IMAD.MOV.U32 R15, RZ, RZ, R4 ; /* 0x000000ffff0f0224 */
/* 0x000fe200078e0004 */
/*0860*/ BRA 0x8c0 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*0870*/ LOP3.LUT R15, R7, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000070f7812 */
/* 0x000fe200078efcff */
/*0880*/ IMAD.MOV.U32 R14, RZ, RZ, R6 ; /* 0x000000ffff0e7224 */
/* 0x000fe200078e0006 */
/*0890*/ BRA 0x8c0 ; /* 0x0000002000007947 */
/* 0x000fea0003800000 */
/*08a0*/ LOP3.LUT R15, R9, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000090f7812 */
/* 0x000fe200078efcff */
/*08b0*/ IMAD.MOV.U32 R14, RZ, RZ, R8 ; /* 0x000000ffff0e7224 */
/* 0x000fe400078e0008 */
/*08c0*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */
/* 0x000fe400078e0000 */
/*08d0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */
/* 0x000fc800078e00ff */
/*08e0*/ RET.REL.NODEC R4 0x0 ; /* 0xfffff71004007950 */
/* 0x000fea0003c3ffff */
/*08f0*/ BRA 0x8f0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0900*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0910*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0920*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0930*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0940*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0950*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0960*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0970*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void compute_row_on_Gamma_matrix_kernel(int row_index, int vertex_index, int* indices, double* exp_V, double* N_ptr, int LD_N, double* G_ptr, int LD_G, double* row_ptr, int incr) {
// int l = threadIdx.x;
int l = blockIdx.x;
int i_index, j_index;
double delta;
i_index = indices[row_index];
j_index = indices[l];
if (j_index < vertex_index) {
delta = i_index == j_index ? 1 : 0;
row_ptr[l * incr] = (N_ptr[i_index + LD_N * j_index] * exp_V[l] - delta) / (exp_V[l] - 1.);
}
else
row_ptr[l * incr] = G_ptr[i_index + LD_G * (j_index - vertex_index)];
} | .file "tmpxft_000be8e0_00000000-6_compute_row_on_Gamma_matrix_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z70__device_stub__Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_iiiPiPdS0_iS0_iS0_i
.type _Z70__device_stub__Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_iiiPiPdS0_iS0_iS0_i, @function
_Z70__device_stub__Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_iiiPiPdS0_iS0_iS0_i:
.LFB2051:
.cfi_startproc
endbr64
subq $232, %rsp
.cfi_def_cfa_offset 240
movl %edi, 60(%rsp)
movl %esi, 56(%rsp)
movq %rdx, 48(%rsp)
movq %rcx, 40(%rsp)
movq %r8, 32(%rsp)
movl %r9d, 28(%rsp)
movq 240(%rsp), %rax
movq %rax, 16(%rsp)
movq 256(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 60(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rax
movq %rax, 152(%rsp)
leaq 32(%rsp), %rax
movq %rax, 160(%rsp)
leaq 28(%rsp), %rax
movq %rax, 168(%rsp)
leaq 16(%rsp), %rax
movq %rax, 176(%rsp)
leaq 248(%rsp), %rax
movq %rax, 184(%rsp)
leaq 8(%rsp), %rax
movq %rax, 192(%rsp)
leaq 264(%rsp), %rax
movq %rax, 200(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z70__device_stub__Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_iiiPiPdS0_iS0_iS0_i, .-_Z70__device_stub__Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_iiiPiPdS0_iS0_iS0_i
.globl _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.type _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i, @function
_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
pushq 40(%rsp)
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z70__device_stub__Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_iiiPiPdS0_iS0_iS0_i
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i, .-_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void compute_row_on_Gamma_matrix_kernel(int row_index, int vertex_index, int* indices, double* exp_V, double* N_ptr, int LD_N, double* G_ptr, int LD_G, double* row_ptr, int incr) {
// int l = threadIdx.x;
int l = blockIdx.x;
int i_index, j_index;
double delta;
i_index = indices[row_index];
j_index = indices[l];
if (j_index < vertex_index) {
delta = i_index == j_index ? 1 : 0;
row_ptr[l * incr] = (N_ptr[i_index + LD_N * j_index] * exp_V[l] - delta) / (exp_V[l] - 1.);
}
else
row_ptr[l * incr] = G_ptr[i_index + LD_G * (j_index - vertex_index)];
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void compute_row_on_Gamma_matrix_kernel(int row_index, int vertex_index, int* indices, double* exp_V, double* N_ptr, int LD_N, double* G_ptr, int LD_G, double* row_ptr, int incr) {
// int l = threadIdx.x;
int l = blockIdx.x;
int i_index, j_index;
double delta;
i_index = indices[row_index];
j_index = indices[l];
if (j_index < vertex_index) {
delta = i_index == j_index ? 1 : 0;
row_ptr[l * incr] = (N_ptr[i_index + LD_N * j_index] * exp_V[l] - delta) / (exp_V[l] - 1.);
}
else
row_ptr[l * incr] = G_ptr[i_index + LD_G * (j_index - vertex_index)];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void compute_row_on_Gamma_matrix_kernel(int row_index, int vertex_index, int* indices, double* exp_V, double* N_ptr, int LD_N, double* G_ptr, int LD_G, double* row_ptr, int incr) {
// int l = threadIdx.x;
int l = blockIdx.x;
int i_index, j_index;
double delta;
i_index = indices[row_index];
j_index = indices[l];
if (j_index < vertex_index) {
delta = i_index == j_index ? 1 : 0;
row_ptr[l * incr] = (N_ptr[i_index + LD_N * j_index] * exp_V[l] - delta) / (exp_V[l] - 1.);
}
else
row_ptr[l * incr] = G_ptr[i_index + LD_G * (j_index - vertex_index)];
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.globl _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.p2align 8
.type _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i,@function
_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i:
s_load_b128 s[4:7], s[0:1], 0x0
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_ashr_i32 s9, s4, 31
s_mov_b32 s8, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[8:9], 2
s_add_u32 s8, s6, s8
s_addc_u32 s9, s7, s9
s_ashr_i32 s3, s15, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[10:11], s[2:3], 2
s_add_u32 s6, s6, s10
s_addc_u32 s7, s7, s11
s_clause 0x1
s_load_b32 s7, s[6:7], 0x0
s_load_b32 s6, s[8:9], 0x0
s_waitcnt lgkmcnt(0)
s_cmp_ge_i32 s7, s5
s_cbranch_scc0 .LBB0_3
s_clause 0x1
s_load_b32 s4, s[0:1], 0x30
s_load_b64 s[8:9], s[0:1], 0x28
s_sub_i32 s5, s7, s5
s_waitcnt lgkmcnt(0)
s_mul_i32 s4, s5, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s4, s4, s6
s_ashr_i32 s5, s4, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[4:5], s[4:5], 3
s_add_u32 s4, s8, s4
s_addc_u32 s5, s9, s5
s_load_b64 s[4:5], s[4:5], 0x0
s_cbranch_execz .LBB0_4
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
s_branch .LBB0_5
.LBB0_3:
.LBB0_4:
s_waitcnt lgkmcnt(0)
s_clause 0x1
s_load_b32 s4, s[0:1], 0x20
s_load_b128 s[8:11], s[0:1], 0x10
s_cmp_eq_u32 s6, s7
s_cselect_b32 s12, 0x3ff00000, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s12
s_waitcnt lgkmcnt(0)
s_mul_i32 s4, s7, s4
s_add_i32 s4, s4, s6
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s5, s4, 31
s_lshl_b64 s[4:5], s[4:5], 3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_add_u32 s4, s10, s4
s_addc_u32 s5, s11, s5
s_lshl_b64 s[6:7], s[2:3], 3
s_add_u32 s6, s8, s6
s_addc_u32 s7, s9, s7
s_load_b64 s[4:5], s[4:5], 0x0
s_load_b64 s[6:7], s[6:7], 0x0
s_waitcnt lgkmcnt(0)
v_fma_f64 v[0:1], s[4:5], s[6:7], -v[0:1]
v_add_f64 v[2:3], s[6:7], -1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f64 v[4:5], null, v[2:3], v[2:3], v[0:1]
v_rcp_f64_e32 v[6:7], v[4:5]
s_waitcnt_depctr 0xfff
v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
v_div_scale_f64 v[8:9], vcc_lo, v[0:1], v[2:3], v[0:1]
v_mul_f64 v[10:11], v[8:9], v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[4:5], -v[4:5], v[10:11], v[8:9]
v_div_fmas_f64 v[4:5], v[4:5], v[6:7], v[10:11]
s_delay_alu instid0(VALU_DEP_1)
v_div_fixup_f64 v[0:1], v[4:5], v[2:3], v[0:1]
.LBB0_5:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x40
s_load_b64 s[0:1], s[0:1], 0x38
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s3, s2, 31
s_lshl_b64 s[2:3], s[2:3], 3
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b64 v2, v[0:1], s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 68
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i, .Lfunc_end0-_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .offset: 48
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .offset: 64
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 68
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void compute_row_on_Gamma_matrix_kernel(int row_index, int vertex_index, int* indices, double* exp_V, double* N_ptr, int LD_N, double* G_ptr, int LD_G, double* row_ptr, int incr) {
// int l = threadIdx.x;
int l = blockIdx.x;
int i_index, j_index;
double delta;
i_index = indices[row_index];
j_index = indices[l];
if (j_index < vertex_index) {
delta = i_index == j_index ? 1 : 0;
row_ptr[l * incr] = (N_ptr[i_index + LD_N * j_index] * exp_V[l] - delta) / (exp_V[l] - 1.);
}
else
row_ptr[l * incr] = G_ptr[i_index + LD_G * (j_index - vertex_index)];
} | .text
.file "compute_row_on_Gamma_matrix_kernel.hip"
.globl _Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i # -- Begin function _Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.p2align 4, 0x90
.type _Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i,@function
_Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i: # @_Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.cfi_startproc
# %bb.0:
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movl %r9d, 12(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $200, %rsp
.cfi_adjust_cfa_offset -200
retq
.Lfunc_end0:
.size _Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i, .Lfunc_end0-_Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i,@object # @_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.section .rodata,"a",@progbits
.globl _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.p2align 3, 0x0
_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i:
.quad _Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.size _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i"
.size .L__unnamed_1, 57
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R9, SR_CTAID.X ; /* 0x0000000000097919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff057624 */
/* 0x000fe400078e00ff */
/*0050*/ IMAD.WIDE R6, R9, R4, c[0x0][0x168] ; /* 0x00005a0009067625 */
/* 0x001fca00078e0204 */
/*0060*/ LDG.E R0, [R6.64] ; /* 0x0000000406007981 */
/* 0x000ea2000c1e1900 */
/*0070*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fca00078e0205 */
/*0080*/ LDG.E R15, [R4.64] ; /* 0x00000004040f7981 */
/* 0x000162000c1e1900 */
/*0090*/ IMAD.MOV.U32 R11, RZ, RZ, 0x8 ; /* 0x00000008ff0b7424 */
/* 0x000fe200078e00ff */
/*00a0*/ SHF.R.S32.HI R10, RZ, 0x1f, R9 ; /* 0x0000001fff0a7819 */
/* 0x000fe20000011409 */
/*00b0*/ IMAD R2, R9, c[0x0][0x1a0], RZ ; /* 0x0000680009027a24 */
/* 0x000fc800078e02ff */
/*00c0*/ IMAD.WIDE R2, R2, R11, c[0x0][0x198] ; /* 0x0000660002027625 */
/* 0x000fe200078e020b */
/*00d0*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x164], PT ; /* 0x0000590000007a0c */
/* 0x004fda0003f06270 */
/*00e0*/ @!P0 BRA 0x150 ; /* 0x0000006000008947 */
/* 0x000fea0003800000 */
/*00f0*/ IADD3 R0, R0, -c[0x0][0x164], RZ ; /* 0x8000590000007a10 */
/* 0x001fca0007ffe0ff */
/*0100*/ IMAD R0, R0, c[0x0][0x190], R15 ; /* 0x0000640000007a24 */
/* 0x020fc800078e020f */
/*0110*/ IMAD.WIDE R4, R0, R11, c[0x0][0x188] ; /* 0x0000620000047625 */
/* 0x000fcc00078e020b */
/*0120*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1b00 */
/*0130*/ STG.E.64 [R2.64], R4 ; /* 0x0000000402007986 */
/* 0x004fe2000c101b04 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ LEA R8, P0, R9, c[0x0][0x170], 0x3 ; /* 0x00005c0009087a11 */
/* 0x001fc800078018ff */
/*0160*/ LEA.HI.X R9, R9, c[0x0][0x174], R10, 0x3, P0 ; /* 0x00005d0009097a11 */
/* 0x000fcc00000f1c0a */
/*0170*/ LDG.E.64 R8, [R8.64] ; /* 0x0000000408087981 */
/* 0x000ea2000c1e1b00 */
/*0180*/ IMAD R4, R0, c[0x0][0x180], R15 ; /* 0x0000600000047a24 */
/* 0x020fc800078e020f */
/*0190*/ IMAD.WIDE R4, R4, R11, c[0x0][0x178] ; /* 0x00005e0004047625 */
/* 0x000fcc00078e020b */
/*01a0*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ee2000c1e1b00 */
/*01b0*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */
/* 0x000fe200078e00ff */
/*01c0*/ ISETP.NE.AND P0, PT, R15, R0, PT ; /* 0x000000000f00720c */
/* 0x000fe20003f05270 */
/*01d0*/ IMAD.MOV.U32 R14, RZ, RZ, RZ ; /* 0x000000ffff0e7224 */
/* 0x000fc600078e00ff */
/*01e0*/ FSEL R15, RZ, 1.875, P0 ; /* 0x3ff00000ff0f7808 */
/* 0x000fe20000000000 */
/*01f0*/ DADD R6, R8, -1 ; /* 0xbff0000008067429 */
/* 0x004e0c0000000000 */
/*0200*/ MUFU.RCP64H R11, R7 ; /* 0x00000007000b7308 */
/* 0x001e220000001800 */
/*0210*/ DFMA R8, R8, R4, -R14 ; /* 0x000000040808722b */
/* 0x008e54000000080e */
/*0220*/ FSETP.GEU.AND P1, PT, |R9|, 6.5827683646048100446e-37, PT ; /* 0x036000000900780b */
/* 0x002fe20003f2e200 */
/*0230*/ DFMA R12, -R6, R10, 1 ; /* 0x3ff00000060c742b */
/* 0x001e0c000000010a */
/*0240*/ DFMA R12, R12, R12, R12 ; /* 0x0000000c0c0c722b */
/* 0x001e0c000000000c */
/*0250*/ DFMA R12, R10, R12, R10 ; /* 0x0000000c0a0c722b */
/* 0x001e0c000000000a */
/*0260*/ DFMA R10, -R6, R12, 1 ; /* 0x3ff00000060a742b */
/* 0x001e0c000000010c */
/*0270*/ DFMA R10, R12, R10, R12 ; /* 0x0000000a0c0a722b */
/* 0x001e0c000000000c */
/*0280*/ DMUL R4, R8, R10 ; /* 0x0000000a08047228 */
/* 0x001e0c0000000000 */
/*0290*/ DFMA R12, -R6, R4, R8 ; /* 0x00000004060c722b */
/* 0x001e0c0000000108 */
/*02a0*/ DFMA R4, R10, R12, R4 ; /* 0x0000000c0a04722b */
/* 0x001e140000000004 */
/*02b0*/ FFMA R0, RZ, R7, R5 ; /* 0x00000007ff007223 */
/* 0x001fca0000000005 */
/*02c0*/ FSETP.GT.AND P0, PT, |R0|, 1.469367938527859385e-39, PT ; /* 0x001000000000780b */
/* 0x000fda0003f04200 */
/*02d0*/ @P0 BRA P1, 0x320 ; /* 0x0000004000000947 */
/* 0x000fea0000800000 */
/*02e0*/ MOV R0, 0x300 ; /* 0x0000030000007802 */
/* 0x000fe40000000f00 */
/*02f0*/ CALL.REL.NOINC 0x340 ; /* 0x0000004000007944 */
/* 0x000fea0003c00000 */
/*0300*/ IMAD.MOV.U32 R4, RZ, RZ, R14 ; /* 0x000000ffff047224 */
/* 0x000fe400078e000e */
/*0310*/ IMAD.MOV.U32 R5, RZ, RZ, R15 ; /* 0x000000ffff057224 */
/* 0x000fca00078e000f */
/*0320*/ STG.E.64 [R2.64], R4 ; /* 0x0000000402007986 */
/* 0x000fe2000c101b04 */
/*0330*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0340*/ FSETP.GEU.AND P0, PT, |R7|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000000700780b */
/* 0x040fe20003f0e200 */
/*0350*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */
/* 0x000fe200078e00ff */
/*0360*/ LOP3.LUT R4, R7.reuse, 0x800fffff, RZ, 0xc0, !PT ; /* 0x800fffff07047812 */
/* 0x040fe200078ec0ff */
/*0370*/ IMAD.MOV.U32 R15, RZ, RZ, 0x1ca00000 ; /* 0x1ca00000ff0f7424 */
/* 0x000fe200078e00ff */
/*0380*/ LOP3.LUT R16, R7, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000007107812 */
/* 0x000fe400078ec0ff */
/*0390*/ LOP3.LUT R5, R4, 0x3ff00000, RZ, 0xfc, !PT ; /* 0x3ff0000004057812 */
/* 0x000fe200078efcff */
/*03a0*/ IMAD.MOV.U32 R4, RZ, RZ, R6 ; /* 0x000000ffff047224 */
/* 0x000fe200078e0006 */
/*03b0*/ LOP3.LUT R14, R9, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff00000090e7812 */
/* 0x000fc800078ec0ff */
/*03c0*/ ISETP.GE.U32.AND P1, PT, R14, R16, PT ; /* 0x000000100e00720c */
/* 0x000fe20003f26070 */
/*03d0*/ @!P0 DMUL R4, R6, 8.98846567431157953865e+307 ; /* 0x7fe0000006048828 */
/* 0x000e060000000000 */
/*03e0*/ SEL R17, R15, 0x63400000, !P1 ; /* 0x634000000f117807 */
/* 0x000fe40004800000 */
/*03f0*/ FSETP.GEU.AND P1, PT, |R9|, 1.469367938527859385e-39, PT ; /* 0x001000000900780b */
/* 0x000fe20003f2e200 */
/*0400*/ MUFU.RCP64H R11, R5 ; /* 0x00000005000b7308 */
/* 0x001e280000001800 */
/*0410*/ @!P0 LOP3.LUT R16, R5, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000005108812 */
/* 0x000fe200078ec0ff */
/*0420*/ DFMA R12, R10, -R4, 1 ; /* 0x3ff000000a0c742b */
/* 0x001e0c0000000804 */
/*0430*/ DFMA R12, R12, R12, R12 ; /* 0x0000000c0c0c722b */
/* 0x001e0c000000000c */
/*0440*/ DFMA R12, R10, R12, R10 ; /* 0x0000000c0a0c722b */
/* 0x001e0c000000000a */
/*0450*/ DFMA R10, R12, -R4, 1 ; /* 0x3ff000000c0a742b */
/* 0x001e0c0000000804 */
/*0460*/ DFMA R12, R12, R10, R12 ; /* 0x0000000a0c0c722b */
/* 0x001064000000000c */
/*0470*/ LOP3.LUT R11, R17, 0x800fffff, R9, 0xf8, !PT ; /* 0x800fffff110b7812 */
/* 0x001fe200078ef809 */
/*0480*/ IMAD.MOV.U32 R17, RZ, RZ, R14 ; /* 0x000000ffff117224 */
/* 0x000fe400078e000e */
/*0490*/ IMAD.MOV.U32 R10, RZ, RZ, R8 ; /* 0x000000ffff0a7224 */
/* 0x000fe200078e0008 */
/*04a0*/ @P1 BRA 0x530 ; /* 0x0000008000001947 */
/* 0x000fea0003800000 */
/*04b0*/ LOP3.LUT R17, R7, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000007117812 */
/* 0x002fe200078ec0ff */
/*04c0*/ IMAD.MOV.U32 R18, RZ, RZ, RZ ; /* 0x000000ffff127224 */
/* 0x000fc600078e00ff */
/*04d0*/ ISETP.GE.U32.AND P0, PT, R14, R17, PT ; /* 0x000000110e00720c */
/* 0x000fc80003f06070 */
/*04e0*/ SEL R17, R15, 0x63400000, !P0 ; /* 0x634000000f117807 */
/* 0x000fc80004000000 */
/*04f0*/ LOP3.LUT R17, R17, 0x80000000, R9, 0xf8, !PT ; /* 0x8000000011117812 */
/* 0x000fc800078ef809 */
/*0500*/ LOP3.LUT R19, R17, 0x100000, RZ, 0xfc, !PT ; /* 0x0010000011137812 */
/* 0x000fcc00078efcff */
/*0510*/ DFMA R10, R10, 2, -R18 ; /* 0x400000000a0a782b */
/* 0x000e140000000812 */
/*0520*/ LOP3.LUT R17, R11, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff000000b117812 */
/* 0x001fc800078ec0ff */
/*0530*/ IADD3 R20, R17, -0x1, RZ ; /* 0xffffffff11147810 */
/* 0x002fe20007ffe0ff */
/*0540*/ DMUL R18, R12, R10 ; /* 0x0000000a0c127228 */
/* 0x000e220000000000 */
/*0550*/ IADD3 R22, R16, -0x1, RZ ; /* 0xffffffff10167810 */
/* 0x000fe40007ffe0ff */
/*0560*/ ISETP.GT.U32.AND P0, PT, R20, 0x7feffffe, PT ; /* 0x7feffffe1400780c */
/* 0x000fc60003f04070 */
/*0570*/ DFMA R20, R18, -R4, R10 ; /* 0x800000041214722b */
/* 0x001e22000000000a */
/*0580*/ ISETP.GT.U32.OR P0, PT, R22, 0x7feffffe, P0 ; /* 0x7feffffe1600780c */
/* 0x000fca0000704470 */
/*0590*/ DFMA R12, R12, R20, R18 ; /* 0x000000140c0c722b */
/* 0x0010500000000012 */
/*05a0*/ @P0 BRA 0x770 ; /* 0x000001c000000947 */
/* 0x000fea0003800000 */
/*05b0*/ LOP3.LUT R9, R7, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000007097812 */
/* 0x003fc800078ec0ff */
/*05c0*/ ISETP.GE.U32.AND P0, PT, R14.reuse, R9, PT ; /* 0x000000090e00720c */
/* 0x040fe20003f06070 */
/*05d0*/ IMAD.IADD R8, R14, 0x1, -R9 ; /* 0x000000010e087824 */
/* 0x000fc600078e0a09 */
/*05e0*/ SEL R15, R15, 0x63400000, !P0 ; /* 0x634000000f0f7807 */
/* 0x000fe40004000000 */
/*05f0*/ IMNMX R8, R8, -0x46a00000, !PT ; /* 0xb960000008087817 */
/* 0x000fc80007800200 */
/*0600*/ IMNMX R8, R8, 0x46a00000, PT ; /* 0x46a0000008087817 */
/* 0x000fca0003800200 */
/*0610*/ IMAD.IADD R16, R8, 0x1, -R15 ; /* 0x0000000108107824 */
/* 0x000fe400078e0a0f */
/*0620*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x000fc600078e00ff */
/*0630*/ IADD3 R9, R16, 0x7fe00000, RZ ; /* 0x7fe0000010097810 */
/* 0x000fcc0007ffe0ff */
/*0640*/ DMUL R14, R12, R8 ; /* 0x000000080c0e7228 */
/* 0x000e140000000000 */
/*0650*/ FSETP.GTU.AND P0, PT, |R15|, 1.469367938527859385e-39, PT ; /* 0x001000000f00780b */
/* 0x001fda0003f0c200 */
/*0660*/ @P0 BRA 0x8c0 ; /* 0x0000025000000947 */
/* 0x000fea0003800000 */
/*0670*/ DFMA R4, R12, -R4, R10 ; /* 0x800000040c04722b */
/* 0x000e22000000000a */
/*0680*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x000fd200078e00ff */
/*0690*/ FSETP.NEU.AND P0, PT, R5.reuse, RZ, PT ; /* 0x000000ff0500720b */
/* 0x041fe40003f0d000 */
/*06a0*/ LOP3.LUT R7, R5, 0x80000000, R7, 0x48, !PT ; /* 0x8000000005077812 */
/* 0x000fc800078e4807 */
/*06b0*/ LOP3.LUT R9, R7, R9, RZ, 0xfc, !PT ; /* 0x0000000907097212 */
/* 0x000fce00078efcff */
/*06c0*/ @!P0 BRA 0x8c0 ; /* 0x000001f000008947 */
/* 0x000fea0003800000 */
/*06d0*/ IMAD.MOV R5, RZ, RZ, -R16 ; /* 0x000000ffff057224 */
/* 0x000fe200078e0a10 */
/*06e0*/ DMUL.RP R8, R12, R8 ; /* 0x000000080c087228 */
/* 0x000e220000008000 */
/*06f0*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fcc00078e00ff */
/*0700*/ DFMA R4, R14, -R4, R12 ; /* 0x800000040e04722b */
/* 0x000e46000000000c */
/*0710*/ LOP3.LUT R7, R9, R7, RZ, 0x3c, !PT ; /* 0x0000000709077212 */
/* 0x001fc600078e3cff */
/*0720*/ IADD3 R4, -R16, -0x43300000, RZ ; /* 0xbcd0000010047810 */
/* 0x002fc80007ffe1ff */
/*0730*/ FSETP.NEU.AND P0, PT, |R5|, R4, PT ; /* 0x000000040500720b */
/* 0x000fc80003f0d200 */
/*0740*/ FSEL R14, R8, R14, !P0 ; /* 0x0000000e080e7208 */
/* 0x000fe40004000000 */
/*0750*/ FSEL R15, R7, R15, !P0 ; /* 0x0000000f070f7208 */
/* 0x000fe20004000000 */
/*0760*/ BRA 0x8c0 ; /* 0x0000015000007947 */
/* 0x000fea0003800000 */
/*0770*/ DSETP.NAN.AND P0, PT, R8, R8, PT ; /* 0x000000080800722a */
/* 0x003e1c0003f08000 */
/*0780*/ @P0 BRA 0x8a0 ; /* 0x0000011000000947 */
/* 0x001fea0003800000 */
/*0790*/ DSETP.NAN.AND P0, PT, R6, R6, PT ; /* 0x000000060600722a */
/* 0x000e1c0003f08000 */
/*07a0*/ @P0 BRA 0x870 ; /* 0x000000c000000947 */
/* 0x001fea0003800000 */
/*07b0*/ ISETP.NE.AND P0, PT, R17, R16, PT ; /* 0x000000101100720c */
/* 0x000fe20003f05270 */
/*07c0*/ IMAD.MOV.U32 R14, RZ, RZ, 0x0 ; /* 0x00000000ff0e7424 */
/* 0x000fe400078e00ff */
/*07d0*/ IMAD.MOV.U32 R15, RZ, RZ, -0x80000 ; /* 0xfff80000ff0f7424 */
/* 0x000fd400078e00ff */
/*07e0*/ @!P0 BRA 0x8c0 ; /* 0x000000d000008947 */
/* 0x000fea0003800000 */
/*07f0*/ ISETP.NE.AND P0, PT, R17, 0x7ff00000, PT ; /* 0x7ff000001100780c */
/* 0x000fe40003f05270 */
/*0800*/ LOP3.LUT R15, R9, 0x80000000, R7, 0x48, !PT ; /* 0x80000000090f7812 */
/* 0x000fe400078e4807 */
/*0810*/ ISETP.EQ.OR P0, PT, R16, RZ, !P0 ; /* 0x000000ff1000720c */
/* 0x000fda0004702670 */
/*0820*/ @P0 LOP3.LUT R4, R15, 0x7ff00000, RZ, 0xfc, !PT ; /* 0x7ff000000f040812 */
/* 0x000fe200078efcff */
/*0830*/ @!P0 IMAD.MOV.U32 R14, RZ, RZ, RZ ; /* 0x000000ffff0e8224 */
/* 0x000fe400078e00ff */
/*0840*/ @P0 IMAD.MOV.U32 R14, RZ, RZ, RZ ; /* 0x000000ffff0e0224 */
/* 0x000fe400078e00ff */
/*0850*/ @P0 IMAD.MOV.U32 R15, RZ, RZ, R4 ; /* 0x000000ffff0f0224 */
/* 0x000fe200078e0004 */
/*0860*/ BRA 0x8c0 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*0870*/ LOP3.LUT R15, R7, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000070f7812 */
/* 0x000fe200078efcff */
/*0880*/ IMAD.MOV.U32 R14, RZ, RZ, R6 ; /* 0x000000ffff0e7224 */
/* 0x000fe200078e0006 */
/*0890*/ BRA 0x8c0 ; /* 0x0000002000007947 */
/* 0x000fea0003800000 */
/*08a0*/ LOP3.LUT R15, R9, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000090f7812 */
/* 0x000fe200078efcff */
/*08b0*/ IMAD.MOV.U32 R14, RZ, RZ, R8 ; /* 0x000000ffff0e7224 */
/* 0x000fe400078e0008 */
/*08c0*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */
/* 0x000fe400078e0000 */
/*08d0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */
/* 0x000fc800078e00ff */
/*08e0*/ RET.REL.NODEC R4 0x0 ; /* 0xfffff71004007950 */
/* 0x000fea0003c3ffff */
/*08f0*/ BRA 0x8f0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0900*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0910*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0920*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0930*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0940*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0950*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0960*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0970*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.globl _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.p2align 8
.type _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i,@function
_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i:
s_load_b128 s[4:7], s[0:1], 0x0
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_ashr_i32 s9, s4, 31
s_mov_b32 s8, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[8:9], 2
s_add_u32 s8, s6, s8
s_addc_u32 s9, s7, s9
s_ashr_i32 s3, s15, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[10:11], s[2:3], 2
s_add_u32 s6, s6, s10
s_addc_u32 s7, s7, s11
s_clause 0x1
s_load_b32 s7, s[6:7], 0x0
s_load_b32 s6, s[8:9], 0x0
s_waitcnt lgkmcnt(0)
s_cmp_ge_i32 s7, s5
s_cbranch_scc0 .LBB0_3
s_clause 0x1
s_load_b32 s4, s[0:1], 0x30
s_load_b64 s[8:9], s[0:1], 0x28
s_sub_i32 s5, s7, s5
s_waitcnt lgkmcnt(0)
s_mul_i32 s4, s5, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s4, s4, s6
s_ashr_i32 s5, s4, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[4:5], s[4:5], 3
s_add_u32 s4, s8, s4
s_addc_u32 s5, s9, s5
s_load_b64 s[4:5], s[4:5], 0x0
s_cbranch_execz .LBB0_4
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
s_branch .LBB0_5
.LBB0_3:
.LBB0_4:
s_waitcnt lgkmcnt(0)
s_clause 0x1
s_load_b32 s4, s[0:1], 0x20
s_load_b128 s[8:11], s[0:1], 0x10
s_cmp_eq_u32 s6, s7
s_cselect_b32 s12, 0x3ff00000, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s12
s_waitcnt lgkmcnt(0)
s_mul_i32 s4, s7, s4
s_add_i32 s4, s4, s6
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s5, s4, 31
s_lshl_b64 s[4:5], s[4:5], 3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_add_u32 s4, s10, s4
s_addc_u32 s5, s11, s5
s_lshl_b64 s[6:7], s[2:3], 3
s_add_u32 s6, s8, s6
s_addc_u32 s7, s9, s7
s_load_b64 s[4:5], s[4:5], 0x0
s_load_b64 s[6:7], s[6:7], 0x0
s_waitcnt lgkmcnt(0)
v_fma_f64 v[0:1], s[4:5], s[6:7], -v[0:1]
v_add_f64 v[2:3], s[6:7], -1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f64 v[4:5], null, v[2:3], v[2:3], v[0:1]
v_rcp_f64_e32 v[6:7], v[4:5]
s_waitcnt_depctr 0xfff
v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
v_div_scale_f64 v[8:9], vcc_lo, v[0:1], v[2:3], v[0:1]
v_mul_f64 v[10:11], v[8:9], v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[4:5], -v[4:5], v[10:11], v[8:9]
v_div_fmas_f64 v[4:5], v[4:5], v[6:7], v[10:11]
s_delay_alu instid0(VALU_DEP_1)
v_div_fixup_f64 v[0:1], v[4:5], v[2:3], v[0:1]
.LBB0_5:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x40
s_load_b64 s[0:1], s[0:1], 0x38
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s3, s2, 31
s_lshl_b64 s[2:3], s[2:3], 3
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b64 v2, v[0:1], s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 68
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i, .Lfunc_end0-_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .offset: 48
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .offset: 64
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 68
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000be8e0_00000000-6_compute_row_on_Gamma_matrix_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z70__device_stub__Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_iiiPiPdS0_iS0_iS0_i
.type _Z70__device_stub__Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_iiiPiPdS0_iS0_iS0_i, @function
_Z70__device_stub__Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_iiiPiPdS0_iS0_iS0_i:
.LFB2051:
.cfi_startproc
endbr64
subq $232, %rsp
.cfi_def_cfa_offset 240
movl %edi, 60(%rsp)
movl %esi, 56(%rsp)
movq %rdx, 48(%rsp)
movq %rcx, 40(%rsp)
movq %r8, 32(%rsp)
movl %r9d, 28(%rsp)
movq 240(%rsp), %rax
movq %rax, 16(%rsp)
movq 256(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 60(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rax
movq %rax, 152(%rsp)
leaq 32(%rsp), %rax
movq %rax, 160(%rsp)
leaq 28(%rsp), %rax
movq %rax, 168(%rsp)
leaq 16(%rsp), %rax
movq %rax, 176(%rsp)
leaq 248(%rsp), %rax
movq %rax, 184(%rsp)
leaq 8(%rsp), %rax
movq %rax, 192(%rsp)
leaq 264(%rsp), %rax
movq %rax, 200(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z70__device_stub__Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_iiiPiPdS0_iS0_iS0_i, .-_Z70__device_stub__Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_iiiPiPdS0_iS0_iS0_i
.globl _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.type _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i, @function
_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
pushq 40(%rsp)
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z70__device_stub__Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_iiiPiPdS0_iS0_iS0_i
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i, .-_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "compute_row_on_Gamma_matrix_kernel.hip"
.globl _Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i # -- Begin function _Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.p2align 4, 0x90
.type _Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i,@function
_Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i: # @_Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.cfi_startproc
# %bb.0:
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movl %r9d, 12(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $200, %rsp
.cfi_adjust_cfa_offset -200
retq
.Lfunc_end0:
.size _Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i, .Lfunc_end0-_Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i,@object # @_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.section .rodata,"a",@progbits
.globl _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.p2align 3, 0x0
_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i:
.quad _Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.size _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i"
.size .L__unnamed_1, 57
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z49__device_stub__compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z34compute_row_on_Gamma_matrix_kerneliiPiPdS0_iS0_iS0_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "cuda_runtime.h"
__global__ void func1(int* a)
{
int idx = threadIdx.x;
int v;
if (idx>32)
v = idx;
else
v = 32;
a[idx] = v;
}
__global__ void func2(int* a)
{
int idx = threadIdx.x;
int v = a[idx]*2;
if (idx>32)
v += 1;
a[idx] = v;
}
__global__ void func3(int* a)
{
int idx = threadIdx.x;
int v;
if (idx>32)
{
v = a[idx] + 1;
}
else{
v = a[idx+1024] * 2;
}
a[idx+2048] = v;
}
/*
__global__ void func(int4 c, float* a)
{
__shared__ float S[4096];
unsigned int tix = threadIdx.x;
unsigned int bix = blockIdx.x;
unsigned int wid = tix/warpSize;
S[tix] = a[tix];
__syncthreads();
if(wid==0)
{
float v0=0.14f;
float v1=1.14f;
float v2=2.14f;
float v3=3.14f;
float va = S[1024-tix];
#pragma unroll 256
for(int i=0; i<4096; i++)
{
v0 = fmaf(v0, va, v0);
v1 = fmaf(v1, va, v1);
v2 = fmaf(v2, va, v2);
v3 = fmaf(v3, va, v3);
}
if(bix==0 && tix==0) a[0] = v0+v1+v2+v3;
}
else{
}
}
__global__ void switchTest(int* a)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int v = idx %8;
switch(v){
case 0:
a[idx] = idx;
break;
case 1:
a[idx] = idx * v;
break;
case 2:
a[idx] = idx + v;
break;
case 3:
a[idx] = idx - v;
break;
case 4:
case 5:
a[idx] = idx + 2*v;
break;
case 6:
case 7:
a[idx] = idx + v*v;
break;
default:
break;
}
}*/
int main()
{
return 0;
} | code for sm_80
Function : _Z5func3Pi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ ISETP.GT.AND P0, PT, R2, 0x20, PT ; /* 0x000000200200780c */
/* 0x001fd00003f04270 */
/*0050*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0060*/ @!P0 LDG.E R5, [R2.64+0x1000] ; /* 0x0010000402058981 */
/* 0x000ea8000c1e1900 */
/*0070*/ @P0 LDG.E R0, [R2.64] ; /* 0x0000000402000981 */
/* 0x000ee2000c1e1900 */
/*0080*/ @!P0 IMAD.SHL.U32 R5, R5, 0x2, RZ ; /* 0x0000000205058824 */
/* 0x004fe200078e00ff */
/*0090*/ @P0 IADD3 R5, R0, 0x1, RZ ; /* 0x0000000100050810 */
/* 0x008fca0007ffe0ff */
/*00a0*/ STG.E [R2.64+0x2000], R5 ; /* 0x0020000502007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z5func2Pi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R4, R3, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x001fca00078e0203 */
/*0050*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea2000c1e1900 */
/*0060*/ ISETP.GT.AND P0, PT, R4, 0x20, PT ; /* 0x000000200400780c */
/* 0x000fc80003f04270 */
/*0070*/ SEL R4, RZ, 0x1, !P0 ; /* 0x00000001ff047807 */
/* 0x000fe20004000000 */
/*0080*/ IMAD.SHL.U32 R5, R0, 0x2, RZ ; /* 0x0000000200057824 */
/* 0x004fca00078e00ff */
/*0090*/ LOP3.LUT R5, R5, 0xfffffffe, R4, 0xe2, !PT ; /* 0xfffffffe05057812 */
/* 0x000fca00078ee204 */
/*00a0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z5func1Pi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ IMNMX R5, R2, 0x20, !PT ; /* 0x0000002002057817 */
/* 0x001fd00007800200 */
/*0050*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0060*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0070*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0080*/ BRA 0x80; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "cuda_runtime.h"
__global__ void func1(int* a)
{
int idx = threadIdx.x;
int v;
if (idx>32)
v = idx;
else
v = 32;
a[idx] = v;
}
__global__ void func2(int* a)
{
int idx = threadIdx.x;
int v = a[idx]*2;
if (idx>32)
v += 1;
a[idx] = v;
}
__global__ void func3(int* a)
{
int idx = threadIdx.x;
int v;
if (idx>32)
{
v = a[idx] + 1;
}
else{
v = a[idx+1024] * 2;
}
a[idx+2048] = v;
}
/*
__global__ void func(int4 c, float* a)
{
__shared__ float S[4096];
unsigned int tix = threadIdx.x;
unsigned int bix = blockIdx.x;
unsigned int wid = tix/warpSize;
S[tix] = a[tix];
__syncthreads();
if(wid==0)
{
float v0=0.14f;
float v1=1.14f;
float v2=2.14f;
float v3=3.14f;
float va = S[1024-tix];
#pragma unroll 256
for(int i=0; i<4096; i++)
{
v0 = fmaf(v0, va, v0);
v1 = fmaf(v1, va, v1);
v2 = fmaf(v2, va, v2);
v3 = fmaf(v3, va, v3);
}
if(bix==0 && tix==0) a[0] = v0+v1+v2+v3;
}
else{
}
}
__global__ void switchTest(int* a)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int v = idx %8;
switch(v){
case 0:
a[idx] = idx;
break;
case 1:
a[idx] = idx * v;
break;
case 2:
a[idx] = idx + v;
break;
case 3:
a[idx] = idx - v;
break;
case 4:
case 5:
a[idx] = idx + 2*v;
break;
case 6:
case 7:
a[idx] = idx + v*v;
break;
default:
break;
}
}*/
int main()
{
return 0;
} | .file "tmpxft_00158627_00000000-6_cutest.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl main
.type main, @function
main:
.LFB2027:
.cfi_startproc
endbr64
movl $0, %eax
ret
.cfi_endproc
.LFE2027:
.size main, .-main
.globl _Z24__device_stub__Z5func1PiPi
.type _Z24__device_stub__Z5func1PiPi, @function
_Z24__device_stub__Z5func1PiPi:
.LFB2052:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L8
.L4:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L9
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z5func1Pi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L4
.L9:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z24__device_stub__Z5func1PiPi, .-_Z24__device_stub__Z5func1PiPi
.globl _Z5func1Pi
.type _Z5func1Pi, @function
_Z5func1Pi:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z24__device_stub__Z5func1PiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z5func1Pi, .-_Z5func1Pi
.globl _Z24__device_stub__Z5func2PiPi
.type _Z24__device_stub__Z5func2PiPi, @function
_Z24__device_stub__Z5func2PiPi:
.LFB2054:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L17
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z5func2Pi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L12
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2054:
.size _Z24__device_stub__Z5func2PiPi, .-_Z24__device_stub__Z5func2PiPi
.globl _Z5func2Pi
.type _Z5func2Pi, @function
_Z5func2Pi:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z24__device_stub__Z5func2PiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _Z5func2Pi, .-_Z5func2Pi
.globl _Z24__device_stub__Z5func3PiPi
.type _Z24__device_stub__Z5func3PiPi, @function
_Z24__device_stub__Z5func3PiPi:
.LFB2056:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L24
.L20:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L25
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z5func3Pi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L20
.L25:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2056:
.size _Z24__device_stub__Z5func3PiPi, .-_Z24__device_stub__Z5func3PiPi
.globl _Z5func3Pi
.type _Z5func3Pi, @function
_Z5func3Pi:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z24__device_stub__Z5func3PiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z5func3Pi, .-_Z5func3Pi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z5func3Pi"
.LC1:
.string "_Z5func2Pi"
.LC2:
.string "_Z5func1Pi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2059:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z5func3Pi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z5func2Pi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z5func1Pi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "cuda_runtime.h"
__global__ void func1(int* a)
{
int idx = threadIdx.x;
int v;
if (idx>32)
v = idx;
else
v = 32;
a[idx] = v;
}
__global__ void func2(int* a)
{
int idx = threadIdx.x;
int v = a[idx]*2;
if (idx>32)
v += 1;
a[idx] = v;
}
__global__ void func3(int* a)
{
int idx = threadIdx.x;
int v;
if (idx>32)
{
v = a[idx] + 1;
}
else{
v = a[idx+1024] * 2;
}
a[idx+2048] = v;
}
/*
__global__ void func(int4 c, float* a)
{
__shared__ float S[4096];
unsigned int tix = threadIdx.x;
unsigned int bix = blockIdx.x;
unsigned int wid = tix/warpSize;
S[tix] = a[tix];
__syncthreads();
if(wid==0)
{
float v0=0.14f;
float v1=1.14f;
float v2=2.14f;
float v3=3.14f;
float va = S[1024-tix];
#pragma unroll 256
for(int i=0; i<4096; i++)
{
v0 = fmaf(v0, va, v0);
v1 = fmaf(v1, va, v1);
v2 = fmaf(v2, va, v2);
v3 = fmaf(v3, va, v3);
}
if(bix==0 && tix==0) a[0] = v0+v1+v2+v3;
}
else{
}
}
__global__ void switchTest(int* a)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int v = idx %8;
switch(v){
case 0:
a[idx] = idx;
break;
case 1:
a[idx] = idx * v;
break;
case 2:
a[idx] = idx + v;
break;
case 3:
a[idx] = idx - v;
break;
case 4:
case 5:
a[idx] = idx + 2*v;
break;
case 6:
case 7:
a[idx] = idx + v*v;
break;
default:
break;
}
}*/
int main()
{
return 0;
} | #include "hip/hip_runtime.h"
__global__ void func1(int* a)
{
int idx = threadIdx.x;
int v;
if (idx>32)
v = idx;
else
v = 32;
a[idx] = v;
}
__global__ void func2(int* a)
{
int idx = threadIdx.x;
int v = a[idx]*2;
if (idx>32)
v += 1;
a[idx] = v;
}
__global__ void func3(int* a)
{
int idx = threadIdx.x;
int v;
if (idx>32)
{
v = a[idx] + 1;
}
else{
v = a[idx+1024] * 2;
}
a[idx+2048] = v;
}
/*
__global__ void func(int4 c, float* a)
{
__shared__ float S[4096];
unsigned int tix = threadIdx.x;
unsigned int bix = blockIdx.x;
unsigned int wid = tix/warpSize;
S[tix] = a[tix];
__syncthreads();
if(wid==0)
{
float v0=0.14f;
float v1=1.14f;
float v2=2.14f;
float v3=3.14f;
float va = S[1024-tix];
#pragma unroll 256
for(int i=0; i<4096; i++)
{
v0 = fmaf(v0, va, v0);
v1 = fmaf(v1, va, v1);
v2 = fmaf(v2, va, v2);
v3 = fmaf(v3, va, v3);
}
if(bix==0 && tix==0) a[0] = v0+v1+v2+v3;
}
else{
}
}
__global__ void switchTest(int* a)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int v = idx %8;
switch(v){
case 0:
a[idx] = idx;
break;
case 1:
a[idx] = idx * v;
break;
case 2:
a[idx] = idx + v;
break;
case 3:
a[idx] = idx - v;
break;
case 4:
case 5:
a[idx] = idx + 2*v;
break;
case 6:
case 7:
a[idx] = idx + v*v;
break;
default:
break;
}
}*/
int main()
{
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include "hip/hip_runtime.h"
__global__ void func1(int* a)
{
int idx = threadIdx.x;
int v;
if (idx>32)
v = idx;
else
v = 32;
a[idx] = v;
}
__global__ void func2(int* a)
{
int idx = threadIdx.x;
int v = a[idx]*2;
if (idx>32)
v += 1;
a[idx] = v;
}
__global__ void func3(int* a)
{
int idx = threadIdx.x;
int v;
if (idx>32)
{
v = a[idx] + 1;
}
else{
v = a[idx+1024] * 2;
}
a[idx+2048] = v;
}
/*
__global__ void func(int4 c, float* a)
{
__shared__ float S[4096];
unsigned int tix = threadIdx.x;
unsigned int bix = blockIdx.x;
unsigned int wid = tix/warpSize;
S[tix] = a[tix];
__syncthreads();
if(wid==0)
{
float v0=0.14f;
float v1=1.14f;
float v2=2.14f;
float v3=3.14f;
float va = S[1024-tix];
#pragma unroll 256
for(int i=0; i<4096; i++)
{
v0 = fmaf(v0, va, v0);
v1 = fmaf(v1, va, v1);
v2 = fmaf(v2, va, v2);
v3 = fmaf(v3, va, v3);
}
if(bix==0 && tix==0) a[0] = v0+v1+v2+v3;
}
else{
}
}
__global__ void switchTest(int* a)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int v = idx %8;
switch(v){
case 0:
a[idx] = idx;
break;
case 1:
a[idx] = idx * v;
break;
case 2:
a[idx] = idx + v;
break;
case 3:
a[idx] = idx - v;
break;
case 4:
case 5:
a[idx] = idx + 2*v;
break;
case 6:
case 7:
a[idx] = idx + v*v;
break;
default:
break;
}
}*/
int main()
{
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5func1Pi
.globl _Z5func1Pi
.p2align 8
.type _Z5func1Pi,@function
_Z5func1Pi:
s_load_b64 s[0:1], s[0:1], 0x0
v_max_u32_e32 v1, 32, v0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5func1Pi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 2
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5func1Pi, .Lfunc_end0-_Z5func1Pi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z5func2Pi
.globl _Z5func2Pi
.p2align 8
.type _Z5func2Pi,@function
_Z5func2Pi:
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v1, 2, v0
v_cmp_lt_u32_e32 vcc_lo, 32, v0
v_cndmask_b32_e64 v0, 0, 1, vcc_lo
s_waitcnt lgkmcnt(0)
global_load_b32 v2, v1, s[0:1]
s_waitcnt vmcnt(0)
v_lshl_or_b32 v0, v2, 1, v0
global_store_b32 v1, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5func2Pi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 2
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z5func2Pi, .Lfunc_end1-_Z5func2Pi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z5func3Pi
.globl _Z5func3Pi
.p2align 8
.type _Z5func3Pi,@function
_Z5func3Pi:
s_load_b64 s[0:1], s[0:1], 0x0
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e32 33, v0
s_xor_b32 s2, exec_lo, s2
s_cbranch_execz .LBB2_2
v_lshl_or_b32 v1, v0, 2, 0x1000
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v1, s[0:1]
s_waitcnt vmcnt(0)
v_lshlrev_b32_e32 v1, 1, v1
.LBB2_2:
s_and_not1_saveexec_b32 s2, s2
s_cbranch_execz .LBB2_4
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v1, s[0:1]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, 1, v1
.LBB2_4:
s_or_b32 exec_lo, exec_lo, s2
v_lshl_or_b32 v0, v0, 2, 0x2000
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5func3Pi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 3
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z5func3Pi, .Lfunc_end2-_Z5func3Pi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5func1Pi
.private_segment_fixed_size: 0
.sgpr_count: 2
.sgpr_spill_count: 0
.symbol: _Z5func1Pi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5func2Pi
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z5func2Pi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5func3Pi
.private_segment_fixed_size: 0
.sgpr_count: 3
.sgpr_spill_count: 0
.symbol: _Z5func3Pi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include "hip/hip_runtime.h"
__global__ void func1(int* a)
{
int idx = threadIdx.x;
int v;
if (idx>32)
v = idx;
else
v = 32;
a[idx] = v;
}
__global__ void func2(int* a)
{
int idx = threadIdx.x;
int v = a[idx]*2;
if (idx>32)
v += 1;
a[idx] = v;
}
__global__ void func3(int* a)
{
int idx = threadIdx.x;
int v;
if (idx>32)
{
v = a[idx] + 1;
}
else{
v = a[idx+1024] * 2;
}
a[idx+2048] = v;
}
/*
__global__ void func(int4 c, float* a)
{
__shared__ float S[4096];
unsigned int tix = threadIdx.x;
unsigned int bix = blockIdx.x;
unsigned int wid = tix/warpSize;
S[tix] = a[tix];
__syncthreads();
if(wid==0)
{
float v0=0.14f;
float v1=1.14f;
float v2=2.14f;
float v3=3.14f;
float va = S[1024-tix];
#pragma unroll 256
for(int i=0; i<4096; i++)
{
v0 = fmaf(v0, va, v0);
v1 = fmaf(v1, va, v1);
v2 = fmaf(v2, va, v2);
v3 = fmaf(v3, va, v3);
}
if(bix==0 && tix==0) a[0] = v0+v1+v2+v3;
}
else{
}
}
__global__ void switchTest(int* a)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int v = idx %8;
switch(v){
case 0:
a[idx] = idx;
break;
case 1:
a[idx] = idx * v;
break;
case 2:
a[idx] = idx + v;
break;
case 3:
a[idx] = idx - v;
break;
case 4:
case 5:
a[idx] = idx + 2*v;
break;
case 6:
case 7:
a[idx] = idx + v*v;
break;
default:
break;
}
}*/
int main()
{
return 0;
} | .text
.file "cutest.hip"
.globl _Z20__device_stub__func1Pi # -- Begin function _Z20__device_stub__func1Pi
.p2align 4, 0x90
.type _Z20__device_stub__func1Pi,@function
_Z20__device_stub__func1Pi: # @_Z20__device_stub__func1Pi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z5func1Pi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z20__device_stub__func1Pi, .Lfunc_end0-_Z20__device_stub__func1Pi
.cfi_endproc
# -- End function
.globl _Z20__device_stub__func2Pi # -- Begin function _Z20__device_stub__func2Pi
.p2align 4, 0x90
.type _Z20__device_stub__func2Pi,@function
_Z20__device_stub__func2Pi: # @_Z20__device_stub__func2Pi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z5func2Pi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end1:
.size _Z20__device_stub__func2Pi, .Lfunc_end1-_Z20__device_stub__func2Pi
.cfi_endproc
# -- End function
.globl _Z20__device_stub__func3Pi # -- Begin function _Z20__device_stub__func3Pi
.p2align 4, 0x90
.type _Z20__device_stub__func3Pi,@function
_Z20__device_stub__func3Pi: # @_Z20__device_stub__func3Pi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z5func3Pi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end2:
.size _Z20__device_stub__func3Pi, .Lfunc_end2-_Z20__device_stub__func3Pi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
xorl %eax, %eax
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5func1Pi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5func2Pi, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5func3Pi, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5func1Pi,@object # @_Z5func1Pi
.section .rodata,"a",@progbits
.globl _Z5func1Pi
.p2align 3, 0x0
_Z5func1Pi:
.quad _Z20__device_stub__func1Pi
.size _Z5func1Pi, 8
.type _Z5func2Pi,@object # @_Z5func2Pi
.globl _Z5func2Pi
.p2align 3, 0x0
_Z5func2Pi:
.quad _Z20__device_stub__func2Pi
.size _Z5func2Pi, 8
.type _Z5func3Pi,@object # @_Z5func3Pi
.globl _Z5func3Pi
.p2align 3, 0x0
_Z5func3Pi:
.quad _Z20__device_stub__func3Pi
.size _Z5func3Pi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z5func1Pi"
.size .L__unnamed_1, 11
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z5func2Pi"
.size .L__unnamed_2, 11
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z5func3Pi"
.size .L__unnamed_3, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__func1Pi
.addrsig_sym _Z20__device_stub__func2Pi
.addrsig_sym _Z20__device_stub__func3Pi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5func1Pi
.addrsig_sym _Z5func2Pi
.addrsig_sym _Z5func3Pi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z5func3Pi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ ISETP.GT.AND P0, PT, R2, 0x20, PT ; /* 0x000000200200780c */
/* 0x001fd00003f04270 */
/*0050*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0060*/ @!P0 LDG.E R5, [R2.64+0x1000] ; /* 0x0010000402058981 */
/* 0x000ea8000c1e1900 */
/*0070*/ @P0 LDG.E R0, [R2.64] ; /* 0x0000000402000981 */
/* 0x000ee2000c1e1900 */
/*0080*/ @!P0 IMAD.SHL.U32 R5, R5, 0x2, RZ ; /* 0x0000000205058824 */
/* 0x004fe200078e00ff */
/*0090*/ @P0 IADD3 R5, R0, 0x1, RZ ; /* 0x0000000100050810 */
/* 0x008fca0007ffe0ff */
/*00a0*/ STG.E [R2.64+0x2000], R5 ; /* 0x0020000502007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z5func2Pi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R4, R3, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x001fca00078e0203 */
/*0050*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea2000c1e1900 */
/*0060*/ ISETP.GT.AND P0, PT, R4, 0x20, PT ; /* 0x000000200400780c */
/* 0x000fc80003f04270 */
/*0070*/ SEL R4, RZ, 0x1, !P0 ; /* 0x00000001ff047807 */
/* 0x000fe20004000000 */
/*0080*/ IMAD.SHL.U32 R5, R0, 0x2, RZ ; /* 0x0000000200057824 */
/* 0x004fca00078e00ff */
/*0090*/ LOP3.LUT R5, R5, 0xfffffffe, R4, 0xe2, !PT ; /* 0xfffffffe05057812 */
/* 0x000fca00078ee204 */
/*00a0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z5func1Pi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ IMNMX R5, R2, 0x20, !PT ; /* 0x0000002002057817 */
/* 0x001fd00007800200 */
/*0050*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0060*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0070*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0080*/ BRA 0x80; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5func1Pi
.globl _Z5func1Pi
.p2align 8
.type _Z5func1Pi,@function
_Z5func1Pi:
s_load_b64 s[0:1], s[0:1], 0x0
v_max_u32_e32 v1, 32, v0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5func1Pi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 2
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5func1Pi, .Lfunc_end0-_Z5func1Pi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z5func2Pi
.globl _Z5func2Pi
.p2align 8
.type _Z5func2Pi,@function
_Z5func2Pi:
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v1, 2, v0
v_cmp_lt_u32_e32 vcc_lo, 32, v0
v_cndmask_b32_e64 v0, 0, 1, vcc_lo
s_waitcnt lgkmcnt(0)
global_load_b32 v2, v1, s[0:1]
s_waitcnt vmcnt(0)
v_lshl_or_b32 v0, v2, 1, v0
global_store_b32 v1, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5func2Pi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 2
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z5func2Pi, .Lfunc_end1-_Z5func2Pi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z5func3Pi
.globl _Z5func3Pi
.p2align 8
.type _Z5func3Pi,@function
_Z5func3Pi:
s_load_b64 s[0:1], s[0:1], 0x0
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e32 33, v0
s_xor_b32 s2, exec_lo, s2
s_cbranch_execz .LBB2_2
v_lshl_or_b32 v1, v0, 2, 0x1000
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v1, s[0:1]
s_waitcnt vmcnt(0)
v_lshlrev_b32_e32 v1, 1, v1
.LBB2_2:
s_and_not1_saveexec_b32 s2, s2
s_cbranch_execz .LBB2_4
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v1, s[0:1]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, 1, v1
.LBB2_4:
s_or_b32 exec_lo, exec_lo, s2
v_lshl_or_b32 v0, v0, 2, 0x2000
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5func3Pi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 3
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z5func3Pi, .Lfunc_end2-_Z5func3Pi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5func1Pi
.private_segment_fixed_size: 0
.sgpr_count: 2
.sgpr_spill_count: 0
.symbol: _Z5func1Pi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5func2Pi
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z5func2Pi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5func3Pi
.private_segment_fixed_size: 0
.sgpr_count: 3
.sgpr_spill_count: 0
.symbol: _Z5func3Pi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00158627_00000000-6_cutest.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl main
.type main, @function
main:
.LFB2027:
.cfi_startproc
endbr64
movl $0, %eax
ret
.cfi_endproc
.LFE2027:
.size main, .-main
.globl _Z24__device_stub__Z5func1PiPi
.type _Z24__device_stub__Z5func1PiPi, @function
_Z24__device_stub__Z5func1PiPi:
.LFB2052:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L8
.L4:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L9
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z5func1Pi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L4
.L9:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z24__device_stub__Z5func1PiPi, .-_Z24__device_stub__Z5func1PiPi
.globl _Z5func1Pi
.type _Z5func1Pi, @function
_Z5func1Pi:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z24__device_stub__Z5func1PiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z5func1Pi, .-_Z5func1Pi
.globl _Z24__device_stub__Z5func2PiPi
.type _Z24__device_stub__Z5func2PiPi, @function
_Z24__device_stub__Z5func2PiPi:
.LFB2054:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L17
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z5func2Pi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L12
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2054:
.size _Z24__device_stub__Z5func2PiPi, .-_Z24__device_stub__Z5func2PiPi
.globl _Z5func2Pi
.type _Z5func2Pi, @function
_Z5func2Pi:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z24__device_stub__Z5func2PiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _Z5func2Pi, .-_Z5func2Pi
.globl _Z24__device_stub__Z5func3PiPi
.type _Z24__device_stub__Z5func3PiPi, @function
_Z24__device_stub__Z5func3PiPi:
.LFB2056:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L24
.L20:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L25
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z5func3Pi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L20
.L25:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2056:
.size _Z24__device_stub__Z5func3PiPi, .-_Z24__device_stub__Z5func3PiPi
.globl _Z5func3Pi
.type _Z5func3Pi, @function
_Z5func3Pi:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z24__device_stub__Z5func3PiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z5func3Pi, .-_Z5func3Pi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z5func3Pi"
.LC1:
.string "_Z5func2Pi"
.LC2:
.string "_Z5func1Pi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2059:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z5func3Pi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z5func2Pi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z5func1Pi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cutest.hip"
.globl _Z20__device_stub__func1Pi # -- Begin function _Z20__device_stub__func1Pi
.p2align 4, 0x90
.type _Z20__device_stub__func1Pi,@function
_Z20__device_stub__func1Pi: # @_Z20__device_stub__func1Pi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z5func1Pi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z20__device_stub__func1Pi, .Lfunc_end0-_Z20__device_stub__func1Pi
.cfi_endproc
# -- End function
.globl _Z20__device_stub__func2Pi # -- Begin function _Z20__device_stub__func2Pi
.p2align 4, 0x90
.type _Z20__device_stub__func2Pi,@function
_Z20__device_stub__func2Pi: # @_Z20__device_stub__func2Pi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z5func2Pi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end1:
.size _Z20__device_stub__func2Pi, .Lfunc_end1-_Z20__device_stub__func2Pi
.cfi_endproc
# -- End function
.globl _Z20__device_stub__func3Pi # -- Begin function _Z20__device_stub__func3Pi
.p2align 4, 0x90
.type _Z20__device_stub__func3Pi,@function
_Z20__device_stub__func3Pi: # @_Z20__device_stub__func3Pi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z5func3Pi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end2:
.size _Z20__device_stub__func3Pi, .Lfunc_end2-_Z20__device_stub__func3Pi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
xorl %eax, %eax
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5func1Pi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5func2Pi, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5func3Pi, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5func1Pi,@object # @_Z5func1Pi
.section .rodata,"a",@progbits
.globl _Z5func1Pi
.p2align 3, 0x0
_Z5func1Pi:
.quad _Z20__device_stub__func1Pi
.size _Z5func1Pi, 8
.type _Z5func2Pi,@object # @_Z5func2Pi
.globl _Z5func2Pi
.p2align 3, 0x0
_Z5func2Pi:
.quad _Z20__device_stub__func2Pi
.size _Z5func2Pi, 8
.type _Z5func3Pi,@object # @_Z5func3Pi
.globl _Z5func3Pi
.p2align 3, 0x0
_Z5func3Pi:
.quad _Z20__device_stub__func3Pi
.size _Z5func3Pi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z5func1Pi"
.size .L__unnamed_1, 11
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z5func2Pi"
.size .L__unnamed_2, 11
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z5func3Pi"
.size .L__unnamed_3, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__func1Pi
.addrsig_sym _Z20__device_stub__func2Pi
.addrsig_sym _Z20__device_stub__func3Pi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5func1Pi
.addrsig_sym _Z5func2Pi
.addrsig_sym _Z5func3Pi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void convolution_kernel_naive(float *output, float *input, float *filter) {
} | code for sm_80
Function : _Z24convolution_kernel_naivePfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void convolution_kernel_naive(float *output, float *input, float *filter) {
} | .file "tmpxft_000543d5_00000000-6_convolution_kernel_naive.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z48__device_stub__Z24convolution_kernel_naivePfS_S_PfS_S_
.type _Z48__device_stub__Z24convolution_kernel_naivePfS_S_PfS_S_, @function
_Z48__device_stub__Z24convolution_kernel_naivePfS_S_PfS_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z24convolution_kernel_naivePfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z48__device_stub__Z24convolution_kernel_naivePfS_S_PfS_S_, .-_Z48__device_stub__Z24convolution_kernel_naivePfS_S_PfS_S_
.globl _Z24convolution_kernel_naivePfS_S_
.type _Z24convolution_kernel_naivePfS_S_, @function
_Z24convolution_kernel_naivePfS_S_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z48__device_stub__Z24convolution_kernel_naivePfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z24convolution_kernel_naivePfS_S_, .-_Z24convolution_kernel_naivePfS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z24convolution_kernel_naivePfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z24convolution_kernel_naivePfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void convolution_kernel_naive(float *output, float *input, float *filter) {
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void convolution_kernel_naive(float *output, float *input, float *filter) {
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void convolution_kernel_naive(float *output, float *input, float *filter) {
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z24convolution_kernel_naivePfS_S_
.globl _Z24convolution_kernel_naivePfS_S_
.p2align 8
.type _Z24convolution_kernel_naivePfS_S_,@function
_Z24convolution_kernel_naivePfS_S_:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z24convolution_kernel_naivePfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z24convolution_kernel_naivePfS_S_, .Lfunc_end0-_Z24convolution_kernel_naivePfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z24convolution_kernel_naivePfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z24convolution_kernel_naivePfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void convolution_kernel_naive(float *output, float *input, float *filter) {
} | .text
.file "convolution_kernel_naive.hip"
.globl _Z39__device_stub__convolution_kernel_naivePfS_S_ # -- Begin function _Z39__device_stub__convolution_kernel_naivePfS_S_
.p2align 4, 0x90
.type _Z39__device_stub__convolution_kernel_naivePfS_S_,@function
_Z39__device_stub__convolution_kernel_naivePfS_S_: # @_Z39__device_stub__convolution_kernel_naivePfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z24convolution_kernel_naivePfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z39__device_stub__convolution_kernel_naivePfS_S_, .Lfunc_end0-_Z39__device_stub__convolution_kernel_naivePfS_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z24convolution_kernel_naivePfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z24convolution_kernel_naivePfS_S_,@object # @_Z24convolution_kernel_naivePfS_S_
.section .rodata,"a",@progbits
.globl _Z24convolution_kernel_naivePfS_S_
.p2align 3, 0x0
_Z24convolution_kernel_naivePfS_S_:
.quad _Z39__device_stub__convolution_kernel_naivePfS_S_
.size _Z24convolution_kernel_naivePfS_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z24convolution_kernel_naivePfS_S_"
.size .L__unnamed_1, 35
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z39__device_stub__convolution_kernel_naivePfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z24convolution_kernel_naivePfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z24convolution_kernel_naivePfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z24convolution_kernel_naivePfS_S_
.globl _Z24convolution_kernel_naivePfS_S_
.p2align 8
.type _Z24convolution_kernel_naivePfS_S_,@function
_Z24convolution_kernel_naivePfS_S_:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z24convolution_kernel_naivePfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z24convolution_kernel_naivePfS_S_, .Lfunc_end0-_Z24convolution_kernel_naivePfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z24convolution_kernel_naivePfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z24convolution_kernel_naivePfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000543d5_00000000-6_convolution_kernel_naive.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z48__device_stub__Z24convolution_kernel_naivePfS_S_PfS_S_
.type _Z48__device_stub__Z24convolution_kernel_naivePfS_S_PfS_S_, @function
_Z48__device_stub__Z24convolution_kernel_naivePfS_S_PfS_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z24convolution_kernel_naivePfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z48__device_stub__Z24convolution_kernel_naivePfS_S_PfS_S_, .-_Z48__device_stub__Z24convolution_kernel_naivePfS_S_PfS_S_
.globl _Z24convolution_kernel_naivePfS_S_
.type _Z24convolution_kernel_naivePfS_S_, @function
_Z24convolution_kernel_naivePfS_S_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z48__device_stub__Z24convolution_kernel_naivePfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z24convolution_kernel_naivePfS_S_, .-_Z24convolution_kernel_naivePfS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z24convolution_kernel_naivePfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z24convolution_kernel_naivePfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "convolution_kernel_naive.hip"
.globl _Z39__device_stub__convolution_kernel_naivePfS_S_ # -- Begin function _Z39__device_stub__convolution_kernel_naivePfS_S_
.p2align 4, 0x90
.type _Z39__device_stub__convolution_kernel_naivePfS_S_,@function
_Z39__device_stub__convolution_kernel_naivePfS_S_: # @_Z39__device_stub__convolution_kernel_naivePfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z24convolution_kernel_naivePfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z39__device_stub__convolution_kernel_naivePfS_S_, .Lfunc_end0-_Z39__device_stub__convolution_kernel_naivePfS_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z24convolution_kernel_naivePfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z24convolution_kernel_naivePfS_S_,@object # @_Z24convolution_kernel_naivePfS_S_
.section .rodata,"a",@progbits
.globl _Z24convolution_kernel_naivePfS_S_
.p2align 3, 0x0
_Z24convolution_kernel_naivePfS_S_:
.quad _Z39__device_stub__convolution_kernel_naivePfS_S_
.size _Z24convolution_kernel_naivePfS_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z24convolution_kernel_naivePfS_S_"
.size .L__unnamed_1, 35
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z39__device_stub__convolution_kernel_naivePfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z24convolution_kernel_naivePfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // addNext.cu
// Second program from Dr Dobbs tutorial. http://drdobbs.com/parallel/207402986
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
// Host kernel = increment each element by 1
void incOnHost(float *a, int N) {
int i;
for (i=1; i<N; i++) {
a[i] = a[i] + a[i-1];
}
}
// Device Kernel = increment each element by 1
__global__ void incOnDevice(float *a, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if ( (idx<N) && (idx>1) )
a[idx] = a[idx] + a[idx-1];
}
// Main thread
int main(void) {
float *a_h, *b_h;
float *a_d;
int i, N=48;
size_t size = N*sizeof(float);
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
cudaMalloc((void **) &a_d, size);
for (i=0; i<N; i++)
a_h[i] = (float)i;
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
// do calculation on host
incOnHost(a_h, N);
// do calculation on device
int blockSize = 4;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
incOnDevice <<< nBlocks, blockSize >>> (a_d, N);
// Retrieve results and compare
cudaMemcpy(b_h,a_d,size,cudaMemcpyDeviceToHost);
for (i=0; i<N; i++) {
// printf("value from host %d = %1.1f\n",i,a_h[i]);
printf("value from device %d = %1.1f\n",i,b_h[i]);
// assert(a_h[i] == b_h[i]);
}
free(a_h); free(b_h); cudaFree(a_d);
} | code for sm_80
Function : _Z11incOnDevicePfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fc80003f06270 */
/*0050*/ ISETP.LT.OR P0, PT, R2, 0x2, P0 ; /* 0x000000020200780c */
/* 0x000fda0000701670 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0090*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*00a0*/ LDG.E R0, [R2.64+-0x4] ; /* 0xfffffc0402007981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ FADD R5, R0, R5 ; /* 0x0000000500057221 */
/* 0x004fca0000000000 */
/*00d0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // addNext.cu
// Second program from Dr Dobbs tutorial. http://drdobbs.com/parallel/207402986
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
// Host kernel = increment each element by 1
void incOnHost(float *a, int N) {
int i;
for (i=1; i<N; i++) {
a[i] = a[i] + a[i-1];
}
}
// Device Kernel = increment each element by 1
__global__ void incOnDevice(float *a, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if ( (idx<N) && (idx>1) )
a[idx] = a[idx] + a[idx-1];
}
// Main thread
int main(void) {
float *a_h, *b_h;
float *a_d;
int i, N=48;
size_t size = N*sizeof(float);
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
cudaMalloc((void **) &a_d, size);
for (i=0; i<N; i++)
a_h[i] = (float)i;
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
// do calculation on host
incOnHost(a_h, N);
// do calculation on device
int blockSize = 4;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
incOnDevice <<< nBlocks, blockSize >>> (a_d, N);
// Retrieve results and compare
cudaMemcpy(b_h,a_d,size,cudaMemcpyDeviceToHost);
for (i=0; i<N; i++) {
// printf("value from host %d = %1.1f\n",i,a_h[i]);
printf("value from device %d = %1.1f\n",i,b_h[i]);
// assert(a_h[i] == b_h[i]);
}
free(a_h); free(b_h); cudaFree(a_d);
} | .file "tmpxft_0007b5e3_00000000-6_addNext.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9incOnHostPfi
.type _Z9incOnHostPfi, @function
_Z9incOnHostPfi:
.LFB2057:
.cfi_startproc
endbr64
cmpl $1, %esi
jle .L3
leaq 4(%rdi), %rax
leal -2(%rsi), %edx
leaq 8(%rdi,%rdx,4), %rdx
.L5:
movss (%rax), %xmm0
addss -4(%rax), %xmm0
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE2057:
.size _Z9incOnHostPfi, .-_Z9incOnHostPfi
.globl _Z32__device_stub__Z11incOnDevicePfiPfi
.type _Z32__device_stub__Z11incOnDevicePfiPfi, @function
_Z32__device_stub__Z11incOnDevicePfiPfi:
.LFB2083:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z11incOnDevicePfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z32__device_stub__Z11incOnDevicePfiPfi, .-_Z32__device_stub__Z11incOnDevicePfiPfi
.globl _Z11incOnDevicePfi
.type _Z11incOnDevicePfi, @function
_Z11incOnDevicePfi:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z11incOnDevicePfiPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z11incOnDevicePfi, .-_Z11incOnDevicePfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "value from device %d = %1.1f\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $192, %edi
call malloc@PLT
movq %rax, %rbp
movl $192, %edi
call malloc@PLT
movq %rax, %r12
leaq 8(%rsp), %rdi
movl $192, %esi
call cudaMalloc@PLT
movl $0, %eax
.L16:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rax,4)
addq $1, %rax
cmpq $48, %rax
jne .L16
movl $1, %ecx
movl $192, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $48, %esi
movq %rbp, %rdi
call _Z9incOnHostPfi
movl $4, 28(%rsp)
movl $1, 32(%rsp)
movl $12, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L23
.L17:
movl $2, %ecx
movl $192, %edx
movq 8(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl $0, %ebx
leaq .LC0(%rip), %r13
.L18:
pxor %xmm0, %xmm0
cvtss2sd (%r12,%rbx,4), %xmm0
movl %ebx, %edx
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $48, %rbx
jne .L18
movq %rbp, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
movl $48, %esi
movq 8(%rsp), %rdi
call _Z32__device_stub__Z11incOnDevicePfiPfi
jmp .L17
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z11incOnDevicePfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z11incOnDevicePfi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // addNext.cu
// Second program from Dr Dobbs tutorial. http://drdobbs.com/parallel/207402986
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
// Host kernel = increment each element by 1
void incOnHost(float *a, int N) {
int i;
for (i=1; i<N; i++) {
a[i] = a[i] + a[i-1];
}
}
// Device Kernel = increment each element by 1
__global__ void incOnDevice(float *a, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if ( (idx<N) && (idx>1) )
a[idx] = a[idx] + a[idx-1];
}
// Main thread
int main(void) {
float *a_h, *b_h;
float *a_d;
int i, N=48;
size_t size = N*sizeof(float);
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
cudaMalloc((void **) &a_d, size);
for (i=0; i<N; i++)
a_h[i] = (float)i;
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
// do calculation on host
incOnHost(a_h, N);
// do calculation on device
int blockSize = 4;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
incOnDevice <<< nBlocks, blockSize >>> (a_d, N);
// Retrieve results and compare
cudaMemcpy(b_h,a_d,size,cudaMemcpyDeviceToHost);
for (i=0; i<N; i++) {
// printf("value from host %d = %1.1f\n",i,a_h[i]);
printf("value from device %d = %1.1f\n",i,b_h[i]);
// assert(a_h[i] == b_h[i]);
}
free(a_h); free(b_h); cudaFree(a_d);
} | // addNext.cu
// Second program from Dr Dobbs tutorial. http://drdobbs.com/parallel/207402986
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <assert.h>
// Host kernel = increment each element by 1
void incOnHost(float *a, int N) {
int i;
for (i=1; i<N; i++) {
a[i] = a[i] + a[i-1];
}
}
// Device Kernel = increment each element by 1
__global__ void incOnDevice(float *a, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if ( (idx<N) && (idx>1) )
a[idx] = a[idx] + a[idx-1];
}
// Main thread
int main(void) {
float *a_h, *b_h;
float *a_d;
int i, N=48;
size_t size = N*sizeof(float);
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
hipMalloc((void **) &a_d, size);
for (i=0; i<N; i++)
a_h[i] = (float)i;
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice);
// do calculation on host
incOnHost(a_h, N);
// do calculation on device
int blockSize = 4;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
incOnDevice <<< nBlocks, blockSize >>> (a_d, N);
// Retrieve results and compare
hipMemcpy(b_h,a_d,size,hipMemcpyDeviceToHost);
for (i=0; i<N; i++) {
// printf("value from host %d = %1.1f\n",i,a_h[i]);
printf("value from device %d = %1.1f\n",i,b_h[i]);
// assert(a_h[i] == b_h[i]);
}
free(a_h); free(b_h); hipFree(a_d);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | // addNext.cu
// Second program from Dr Dobbs tutorial. http://drdobbs.com/parallel/207402986
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <assert.h>
// Host kernel = increment each element by 1
void incOnHost(float *a, int N) {
int i;
for (i=1; i<N; i++) {
a[i] = a[i] + a[i-1];
}
}
// Device Kernel = increment each element by 1
__global__ void incOnDevice(float *a, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if ( (idx<N) && (idx>1) )
a[idx] = a[idx] + a[idx-1];
}
// Main thread
int main(void) {
float *a_h, *b_h;
float *a_d;
int i, N=48;
size_t size = N*sizeof(float);
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
hipMalloc((void **) &a_d, size);
for (i=0; i<N; i++)
a_h[i] = (float)i;
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice);
// do calculation on host
incOnHost(a_h, N);
// do calculation on device
int blockSize = 4;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
incOnDevice <<< nBlocks, blockSize >>> (a_d, N);
// Retrieve results and compare
hipMemcpy(b_h,a_d,size,hipMemcpyDeviceToHost);
for (i=0; i<N; i++) {
// printf("value from host %d = %1.1f\n",i,a_h[i]);
printf("value from device %d = %1.1f\n",i,b_h[i]);
// assert(a_h[i] == b_h[i]);
}
free(a_h); free(b_h); hipFree(a_d);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11incOnDevicePfi
.globl _Z11incOnDevicePfi
.p2align 8
.type _Z11incOnDevicePfi,@function
_Z11incOnDevicePfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_cmp_gt_i32_e32 vcc_lo, s3, v1
v_cmp_lt_i32_e64 s2, 1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v2, vcc_lo, s0, v3
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_clause 0x1
global_load_b32 v4, v[2:3], off
global_load_b32 v0, v[0:1], off offset:-4
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v4, v0
global_store_b32 v[2:3], v0, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11incOnDevicePfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11incOnDevicePfi, .Lfunc_end0-_Z11incOnDevicePfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11incOnDevicePfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11incOnDevicePfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | // addNext.cu
// Second program from Dr Dobbs tutorial. http://drdobbs.com/parallel/207402986
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <assert.h>
// Host kernel = increment each element by 1
void incOnHost(float *a, int N) {
int i;
for (i=1; i<N; i++) {
a[i] = a[i] + a[i-1];
}
}
// Device Kernel = increment each element by 1
__global__ void incOnDevice(float *a, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if ( (idx<N) && (idx>1) )
a[idx] = a[idx] + a[idx-1];
}
// Main thread
int main(void) {
float *a_h, *b_h;
float *a_d;
int i, N=48;
size_t size = N*sizeof(float);
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
hipMalloc((void **) &a_d, size);
for (i=0; i<N; i++)
a_h[i] = (float)i;
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice);
// do calculation on host
incOnHost(a_h, N);
// do calculation on device
int blockSize = 4;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
incOnDevice <<< nBlocks, blockSize >>> (a_d, N);
// Retrieve results and compare
hipMemcpy(b_h,a_d,size,hipMemcpyDeviceToHost);
for (i=0; i<N; i++) {
// printf("value from host %d = %1.1f\n",i,a_h[i]);
printf("value from device %d = %1.1f\n",i,b_h[i]);
// assert(a_h[i] == b_h[i]);
}
free(a_h); free(b_h); hipFree(a_d);
} | .text
.file "addNext.hip"
.globl _Z9incOnHostPfi # -- Begin function _Z9incOnHostPfi
.p2align 4, 0x90
.type _Z9incOnHostPfi,@function
_Z9incOnHostPfi: # @_Z9incOnHostPfi
.cfi_startproc
# %bb.0:
cmpl $2, %esi
jl .LBB0_3
# %bb.1: # %.lr.ph.preheader
movl %esi, %eax
movss (%rdi), %xmm0 # xmm0 = mem[0],zero,zero,zero
movl $1, %ecx
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
addss (%rdi,%rcx,4), %xmm0
movss %xmm0, (%rdi,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB0_2
.LBB0_3: # %._crit_edge
retq
.Lfunc_end0:
.size _Z9incOnHostPfi, .Lfunc_end0-_Z9incOnHostPfi
.cfi_endproc
# -- End function
.globl _Z26__device_stub__incOnDevicePfi # -- Begin function _Z26__device_stub__incOnDevicePfi
.p2align 4, 0x90
.type _Z26__device_stub__incOnDevicePfi,@function
_Z26__device_stub__incOnDevicePfi: # @_Z26__device_stub__incOnDevicePfi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z11incOnDevicePfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z26__device_stub__incOnDevicePfi, .Lfunc_end1-_Z26__device_stub__incOnDevicePfi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $96, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $192, %edi
callq malloc
movq %rax, %rbx
movl $192, %edi
callq malloc
movq %rax, %r14
leaq 8(%rsp), %rdi
movl $192, %esi
callq hipMalloc
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%rax,4)
incq %rax
cmpq $48, %rax
jne .LBB2_1
# %bb.2:
movq 8(%rsp), %rdi
movl $192, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movss (%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero
movl $1, %eax
.p2align 4, 0x90
.LBB2_3: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
addss (%rbx,%rax,4), %xmm0
movss %xmm0, (%rbx,%rax,4)
incq %rax
cmpq $48, %rax
jne .LBB2_3
# %bb.4: # %_Z9incOnHostPfi.exit
movabsq $4294967300, %rdx # imm = 0x100000004
leaq 8(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
movl $48, 20(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11incOnDevicePfi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
movq 8(%rsp), %rsi
movl $192, %edx
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_7: # =>This Inner Loop Header: Depth=1
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movl %r15d, %esi
movb $1, %al
callq printf
incq %r15
cmpq $48, %r15
jne .LBB2_7
# %bb.8:
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $96, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11incOnDevicePfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11incOnDevicePfi,@object # @_Z11incOnDevicePfi
.section .rodata,"a",@progbits
.globl _Z11incOnDevicePfi
.p2align 3, 0x0
_Z11incOnDevicePfi:
.quad _Z26__device_stub__incOnDevicePfi
.size _Z11incOnDevicePfi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "value from device %d = %1.1f\n"
.size .L.str, 30
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11incOnDevicePfi"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__incOnDevicePfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11incOnDevicePfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z11incOnDevicePfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fc80003f06270 */
/*0050*/ ISETP.LT.OR P0, PT, R2, 0x2, P0 ; /* 0x000000020200780c */
/* 0x000fda0000701670 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0090*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*00a0*/ LDG.E R0, [R2.64+-0x4] ; /* 0xfffffc0402007981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ FADD R5, R0, R5 ; /* 0x0000000500057221 */
/* 0x004fca0000000000 */
/*00d0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11incOnDevicePfi
.globl _Z11incOnDevicePfi
.p2align 8
.type _Z11incOnDevicePfi,@function
_Z11incOnDevicePfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_cmp_gt_i32_e32 vcc_lo, s3, v1
v_cmp_lt_i32_e64 s2, 1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v2, vcc_lo, s0, v3
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_clause 0x1
global_load_b32 v4, v[2:3], off
global_load_b32 v0, v[0:1], off offset:-4
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v4, v0
global_store_b32 v[2:3], v0, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11incOnDevicePfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11incOnDevicePfi, .Lfunc_end0-_Z11incOnDevicePfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11incOnDevicePfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11incOnDevicePfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0007b5e3_00000000-6_addNext.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9incOnHostPfi
.type _Z9incOnHostPfi, @function
_Z9incOnHostPfi:
.LFB2057:
.cfi_startproc
endbr64
cmpl $1, %esi
jle .L3
leaq 4(%rdi), %rax
leal -2(%rsi), %edx
leaq 8(%rdi,%rdx,4), %rdx
.L5:
movss (%rax), %xmm0
addss -4(%rax), %xmm0
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE2057:
.size _Z9incOnHostPfi, .-_Z9incOnHostPfi
.globl _Z32__device_stub__Z11incOnDevicePfiPfi
.type _Z32__device_stub__Z11incOnDevicePfiPfi, @function
_Z32__device_stub__Z11incOnDevicePfiPfi:
.LFB2083:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z11incOnDevicePfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z32__device_stub__Z11incOnDevicePfiPfi, .-_Z32__device_stub__Z11incOnDevicePfiPfi
.globl _Z11incOnDevicePfi
.type _Z11incOnDevicePfi, @function
_Z11incOnDevicePfi:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z11incOnDevicePfiPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z11incOnDevicePfi, .-_Z11incOnDevicePfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "value from device %d = %1.1f\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $192, %edi
call malloc@PLT
movq %rax, %rbp
movl $192, %edi
call malloc@PLT
movq %rax, %r12
leaq 8(%rsp), %rdi
movl $192, %esi
call cudaMalloc@PLT
movl $0, %eax
.L16:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rax,4)
addq $1, %rax
cmpq $48, %rax
jne .L16
movl $1, %ecx
movl $192, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $48, %esi
movq %rbp, %rdi
call _Z9incOnHostPfi
movl $4, 28(%rsp)
movl $1, 32(%rsp)
movl $12, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L23
.L17:
movl $2, %ecx
movl $192, %edx
movq 8(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl $0, %ebx
leaq .LC0(%rip), %r13
.L18:
pxor %xmm0, %xmm0
cvtss2sd (%r12,%rbx,4), %xmm0
movl %ebx, %edx
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $48, %rbx
jne .L18
movq %rbp, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
movl $48, %esi
movq 8(%rsp), %rdi
call _Z32__device_stub__Z11incOnDevicePfiPfi
jmp .L17
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z11incOnDevicePfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z11incOnDevicePfi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "addNext.hip"
.globl _Z9incOnHostPfi # -- Begin function _Z9incOnHostPfi
.p2align 4, 0x90
.type _Z9incOnHostPfi,@function
_Z9incOnHostPfi: # @_Z9incOnHostPfi
.cfi_startproc
# %bb.0:
cmpl $2, %esi
jl .LBB0_3
# %bb.1: # %.lr.ph.preheader
movl %esi, %eax
movss (%rdi), %xmm0 # xmm0 = mem[0],zero,zero,zero
movl $1, %ecx
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
addss (%rdi,%rcx,4), %xmm0
movss %xmm0, (%rdi,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB0_2
.LBB0_3: # %._crit_edge
retq
.Lfunc_end0:
.size _Z9incOnHostPfi, .Lfunc_end0-_Z9incOnHostPfi
.cfi_endproc
# -- End function
.globl _Z26__device_stub__incOnDevicePfi # -- Begin function _Z26__device_stub__incOnDevicePfi
.p2align 4, 0x90
.type _Z26__device_stub__incOnDevicePfi,@function
_Z26__device_stub__incOnDevicePfi: # @_Z26__device_stub__incOnDevicePfi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z11incOnDevicePfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z26__device_stub__incOnDevicePfi, .Lfunc_end1-_Z26__device_stub__incOnDevicePfi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $96, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $192, %edi
callq malloc
movq %rax, %rbx
movl $192, %edi
callq malloc
movq %rax, %r14
leaq 8(%rsp), %rdi
movl $192, %esi
callq hipMalloc
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%rax,4)
incq %rax
cmpq $48, %rax
jne .LBB2_1
# %bb.2:
movq 8(%rsp), %rdi
movl $192, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movss (%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero
movl $1, %eax
.p2align 4, 0x90
.LBB2_3: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
addss (%rbx,%rax,4), %xmm0
movss %xmm0, (%rbx,%rax,4)
incq %rax
cmpq $48, %rax
jne .LBB2_3
# %bb.4: # %_Z9incOnHostPfi.exit
movabsq $4294967300, %rdx # imm = 0x100000004
leaq 8(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
movl $48, 20(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11incOnDevicePfi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
movq 8(%rsp), %rsi
movl $192, %edx
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_7: # =>This Inner Loop Header: Depth=1
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movl %r15d, %esi
movb $1, %al
callq printf
incq %r15
cmpq $48, %r15
jne .LBB2_7
# %bb.8:
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $96, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11incOnDevicePfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11incOnDevicePfi,@object # @_Z11incOnDevicePfi
.section .rodata,"a",@progbits
.globl _Z11incOnDevicePfi
.p2align 3, 0x0
_Z11incOnDevicePfi:
.quad _Z26__device_stub__incOnDevicePfi
.size _Z11incOnDevicePfi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "value from device %d = %1.1f\n"
.size .L.str, 30
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11incOnDevicePfi"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__incOnDevicePfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11incOnDevicePfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include<thrust/scan.h>
/*
Somethings so confuse me, why i can't get same correct result every time.
*/
/*
These two kernel could be used on large array, but slow
Best advice: use __syncthreads() before you want to use different index
*/
__global__ void hillis_steele_scan_forward(float * d_out, float * d_in, const int array_size){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = d_out[idx];
__syncthreads();
d_out[idx + step] += in1;
}
}
__global__ void hillis_steele_scan_backward(float * d_out, float * d_in){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = d_out[idx - step];
__syncthreads();
d_out[idx] += in1;
}
}
/*
These two kernel could be used on small array, but fast
*/
__global__ void shared_hillis_steele_scan_forward(float *d_out, float *d_in, const int array_size) {
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
} // the code below performs iterative scan on XY
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = sdata[idx];
__syncthreads();
sdata[idx + step] += in1;
}
d_out[idx] = sdata[idx];
}
__global__ void shared_hillis_steele_scan_backward(float * d_out, float * d_in, const int array_size){
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
}
sdata[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = sdata[idx - step];
__syncthreads();
sdata[idx] += in1;
}
d_out[idx] = sdata[idx];
}
/*
This kernel will get correct result when array size is power of 2
*/
__global__ void blelloch_exclusive_scan(float *g_odata, float *g_idata, int n)
{
extern __shared__ float temp[];// allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[thid] = g_idata[thid];
for (int d = n / 2; d > 0; d /= 2) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset <<= 1; //multiply by 2 implemented as bitwise operation
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[thid] = temp[thid];
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 1025;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
const int maxThreadPerBlock = 512;
const int numBlock = ARRAY_SIZE / maxThreadPerBlock + 1;
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
//hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in, ARRAY_SIZE);
//hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in);
//shared_hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//shared_hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//blelloch_exclusive_scan<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
thrust::inclusive_scan(h_in, h_in + ARRAY_SIZE, h_out);
//copy back the result array to the CPU
//cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
} | code for sm_80
Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z23blelloch_exclusive_scanPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0040*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x001fcc00078e0203 */
/*0050*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0060*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff047624 */
/* 0x000fe200078e00ff */
/*0070*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fc80003f05270 */
/*0080*/ ISETP.GE.AND P1, PT, R4, 0x2, PT ; /* 0x000000020400780c */
/* 0x000fe20003f26270 */
/*0090*/ IMAD.MOV.U32 R4, RZ, RZ, 0x1 ; /* 0x00000001ff047424 */
/* 0x000fd000078e00ff */
/*00a0*/ @!P0 MOV R8, c[0x0][0x170] ; /* 0x00005c0000088a02 */
/* 0x000fc80000000f00 */
/*00b0*/ @!P0 LEA R8, R8, 0xfffffffc, 0x2 ; /* 0xfffffffc08088811 */
/* 0x000fe200078e10ff */
/*00c0*/ STS [R0.X4], R3 ; /* 0x0000000300007388 */
/* 0x0041e20000004800 */
/*00d0*/ @!P1 BRA 0x200 ; /* 0x0000012000009947 */
/* 0x000fea0003800000 */
/*00e0*/ LEA R3, R0, 0x1, 0x1 ; /* 0x0000000100037811 */
/* 0x001fe200078e08ff */
/*00f0*/ IMAD.MOV.U32 R4, RZ, RZ, 0x1 ; /* 0x00000001ff047424 */
/* 0x000fe400078e00ff */
/*0100*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff027624 */
/* 0x000fca00078e00ff */
/*0110*/ LEA.HI R5, R2, R2, RZ, 0x1 ; /* 0x0000000202057211 */
/* 0x000fe200078f08ff */
/*0120*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe60000010000 */
/*0130*/ SHF.R.S32.HI R9, RZ, 0x1, R5 ; /* 0x00000001ff097819 */
/* 0x000fc80000011405 */
/*0140*/ ISETP.GE.AND P2, PT, R0, R9, PT ; /* 0x000000090000720c */
/* 0x000fda0003f46270 */
/*0150*/ @!P2 IMAD R5, R3, R4, RZ ; /* 0x000000040305a224 */
/* 0x000fca00078e02ff */
/*0160*/ @!P2 SHF.L.U32 R7, R5, 0x2, RZ ; /* 0x000000020507a819 */
/* 0x000fe400000006ff */
/*0170*/ @!P2 LDS R5, [R5.X4+-0x4] ; /* 0xfffffc000505a984 */
/* 0x000fe60000004800 */
/*0180*/ @!P2 IMAD R7, R4.reuse, 0x4, R7 ; /* 0x000000040407a824 */
/* 0x040fe200078e0207 */
/*0190*/ SHF.L.U32 R4, R4, 0x1, RZ ; /* 0x0000000104047819 */
/* 0x000fc800000006ff */
/*01a0*/ @!P2 LDS R6, [R7+-0x4] ; /* 0xfffffc000706a984 */
/* 0x000e240000000800 */
/*01b0*/ @!P2 FADD R6, R6, R5 ; /* 0x000000050606a221 */
/* 0x001fca0000000000 */
/*01c0*/ @!P2 STS [R7+-0x4], R6 ; /* 0xfffffc060700a388 */
/* 0x0001e20000000800 */
/*01d0*/ ISETP.GT.AND P2, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe20003f44270 */
/*01e0*/ IMAD.MOV.U32 R2, RZ, RZ, R9 ; /* 0x000000ffff027224 */
/* 0x000fd800078e0009 */
/*01f0*/ @P2 BRA 0x110 ; /* 0xffffff1000002947 */
/* 0x001fea000383ffff */
/*0200*/ @!P0 STS [R8], RZ ; /* 0x000000ff08008388 */
/* 0x0003e20000000800 */
/*0210*/ SHF.R.S32.HI R9, RZ, 0x1f, R0 ; /* 0x0000001fff097819 */
/* 0x000fe20000011400 */
/*0220*/ @!P1 BRA 0x370 ; /* 0x0000014000009947 */
/* 0x000fea0003800000 */
/*0230*/ LEA R11, R0, 0x1, 0x1 ; /* 0x00000001000b7811 */
/* 0x000fe200078e08ff */
/*0240*/ IMAD.MOV.U32 R3, RZ, RZ, 0x1 ; /* 0x00000001ff037424 */
/* 0x001fc800078e00ff */
/*0250*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0260*/ ISETP.GE.AND P0, PT, R0, R3, PT ; /* 0x000000030000720c */
/* 0x000fe20003f06270 */
/*0270*/ IMAD.SHL.U32 R3, R3, 0x2, RZ ; /* 0x0000000203037824 */
/* 0x000fe200078e00ff */
/*0280*/ SHF.R.S32.HI R4, RZ, 0x1, R4 ; /* 0x00000001ff047819 */
/* 0x000fc60000011404 */
/*0290*/ BSSY B0, 0x360 ; /* 0x000000c000007945 */
/* 0x000fe20003800000 */
/*02a0*/ ISETP.GE.AND P1, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fce0003f26270 */
/*02b0*/ @P0 BRA 0x350 ; /* 0x0000009000000947 */
/* 0x001fea0003800000 */
/*02c0*/ IMAD R2, R4, R11, RZ ; /* 0x0000000b04027224 */
/* 0x000fca00078e02ff */
/*02d0*/ SHF.L.U32 R5, R2, 0x2, RZ ; /* 0x0000000202057819 */
/* 0x000fca00000006ff */
/*02e0*/ IMAD R8, R4, 0x4, R5 ; /* 0x0000000404087824 */
/* 0x002fe400078e0205 */
/*02f0*/ LDS R5, [R2.X4+-0x4] ; /* 0xfffffc0002057984 */
/* 0x000fe80000004800 */
/*0300*/ LDS R7, [R8+-0x4] ; /* 0xfffffc0008077984 */
/* 0x000e280000000800 */
/*0310*/ STS [R2.X4+-0x4], R7 ; /* 0xfffffc0702007388 */
/* 0x001fe80000004800 */
/*0320*/ LDS R6, [R8+-0x4] ; /* 0xfffffc0008067984 */
/* 0x000e240000000800 */
/*0330*/ FADD R5, R5, R6 ; /* 0x0000000605057221 */
/* 0x001fca0000000000 */
/*0340*/ STS [R8+-0x4], R5 ; /* 0xfffffc0508007388 */
/* 0x0001e40000000800 */
/*0350*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0360*/ @!P1 BRA 0x250 ; /* 0xfffffee000009947 */
/* 0x000fea000383ffff */
/*0370*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0380*/ LEA R2, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000027a11 */
/* 0x000fc800078010ff */
/*0390*/ LEA.HI.X R3, R0, c[0x0][0x164], R9, 0x2, P0 ; /* 0x0000590000037a11 */
/* 0x001fe200000f1409 */
/*03a0*/ LDS R5, [R0.X4] ; /* 0x0000000000057984 */
/* 0x000e280000004800 */
/*03b0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*03c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03d0*/ BRA 0x3d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z34shared_hillis_steele_scan_backwardPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x001fda0003f06270 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0060*/ IMAD.WIDE R2, R4, R3, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fca00078e0203 */
/*0070*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fe40003f06270 */
/*0090*/ SHF.R.S32.HI R7, RZ, 0x1f, R4 ; /* 0x0000001fff077819 */
/* 0x000fe20000011404 */
/*00a0*/ STS [R4.X4], R5 ; /* 0x0000000504007388 */
/* 0x0041f40000004800 */
/*00b0*/ @!P0 BRA 0x180 ; /* 0x000000c000008947 */
/* 0x000fea0003800000 */
/*00c0*/ HFMA2.MMA R0, -RZ, RZ, 0, 5.9604644775390625e-08 ; /* 0x00000001ff007435 */
/* 0x000fd000000001ff */
/*00d0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*00e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*00f0*/ IMAD.IADD R2, R4, 0x1, -R0 ; /* 0x0000000104027824 */
/* 0x000fe200078e0a00 */
/*0100*/ SHF.L.U32 R0, R0, 0x1, RZ ; /* 0x0000000100007819 */
/* 0x000fc800000006ff */
/*0110*/ ISETP.GE.AND P0, PT, R4, R0, PT ; /* 0x000000000400720c */
/* 0x000fe20003f06270 */
/*0120*/ LDS R2, [R2.X4] ; /* 0x0000000002027984 */
/* 0x000fe80000004800 */
/*0130*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0140*/ LDS R3, [R4.X4] ; /* 0x0000000004037984 */
/* 0x000e640000004800 */
/*0150*/ FADD R5, R2, R3 ; /* 0x0000000302057221 */
/* 0x003fca0000000000 */
/*0160*/ STS [R4.X4], R5 ; /* 0x0000000504007388 */
/* 0x0001e20000004800 */
/*0170*/ @P0 BRA 0xd0 ; /* 0xffffff5000000947 */
/* 0x000fea000383ffff */
/*0180*/ LEA R2, P0, R4, c[0x0][0x160], 0x2 ; /* 0x0000580004027a11 */
/* 0x000fc800078010ff */
/*0190*/ LEA.HI.X R3, R4, c[0x0][0x164], R7, 0x2, P0 ; /* 0x0000590004037a11 */
/* 0x000fca00000f1407 */
/*01a0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*01b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01c0*/ BRA 0x1c0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z33shared_hillis_steele_scan_forwardPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x170], PT ; /* 0x00005c0006007a0c */
/* 0x001fda0003f06270 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0060*/ IMAD.WIDE R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x000fca00078e0203 */
/*0070*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ MOV R0, c[0x0][0x170] ; /* 0x00005c0000007a02 */
/* 0x000fe40000000f00 */
/*0090*/ SHF.R.S32.HI R7, RZ, 0x1f, R6 ; /* 0x0000001fff077819 */
/* 0x000fe40000011406 */
/*00a0*/ ISETP.GE.AND P0, PT, R0, 0x2, PT ; /* 0x000000020000780c */
/* 0x000fe20003f06270 */
/*00b0*/ STS [R6.X4], R5 ; /* 0x0000000506007388 */
/* 0x0041d80000004800 */
/*00c0*/ @!P0 BRA 0x1d0 ; /* 0x0000010000008947 */
/* 0x000fea0003800000 */
/*00d0*/ IMAD.SHL.U32 R0, R6, 0x4, RZ ; /* 0x0000000406007824 */
/* 0x000fe400078e00ff */
/*00e0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x1 ; /* 0x00000001ff037424 */
/* 0x000fca00078e00ff */
/*00f0*/ IADD3 R2, R6, R3, RZ ; /* 0x0000000306027210 */
/* 0x000fc80007ffe0ff */
/*0100*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06270 */
/*0110*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*0120*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0130*/ IMAD R4, R3.reuse, 0x4, R0 ; /* 0x0000000403047824 */
/* 0x040fe200078e0200 */
/*0140*/ SHF.L.U32 R3, R3, 0x1, RZ ; /* 0x0000000103037819 */
/* 0x000fc800000006ff */
/*0150*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fe20003f06270 */
/*0160*/ LDS R2, [R6.X4] ; /* 0x0000000006027984 */
/* 0x000fe80000004800 */
/*0170*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0180*/ LDS R5, [R4] ; /* 0x0000000004057984 */
/* 0x001e240000000800 */
/*0190*/ FADD R5, R2, R5 ; /* 0x0000000502057221 */
/* 0x001fca0000000000 */
/*01a0*/ STS [R4], R5 ; /* 0x0000000504007388 */
/* 0x0001e20000000800 */
/*01b0*/ @!P0 BRA 0xf0 ; /* 0xffffff3000008947 */
/* 0x000fea000383ffff */
/*01c0*/ LDS R5, [R6.X4] ; /* 0x0000000006057984 */
/* 0x0010640000004800 */
/*01d0*/ LEA R2, P0, R6, c[0x0][0x160], 0x2 ; /* 0x0000580006027a11 */
/* 0x000fc800078010ff */
/*01e0*/ LEA.HI.X R3, R6, c[0x0][0x164], R7, 0x2, P0 ; /* 0x0000590006037a11 */
/* 0x000fca00000f1407 */
/*01f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x002fe2000c101904 */
/*0200*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0210*/ BRA 0x210; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z27hillis_steele_scan_backwardPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R11, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0b7435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R0, R11, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e020b */
/*0070*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0080*/ ISETP.GE.AND P0, PT, R0.reuse, 0x1, PT ; /* 0x000000010000780c */
/* 0x040fe20003f06270 */
/*0090*/ IMAD.WIDE R4, R0, R11, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fca00078e020b */
/*00a0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x0041ee000c101904 */
/*00b0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00c0*/ MOV R7, 0x1 ; /* 0x0000000100077802 */
/* 0x000fe40000000f00 */
/*00d0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*00e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*00f0*/ IADD3 R2, R0, -R7, RZ ; /* 0x8000000700027210 */
/* 0x000fca0007ffe0ff */
/*0100*/ IMAD.WIDE R2, R2, R11, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x001fcc00078e020b */
/*0110*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0120*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0130*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */
/* 0x000ea2000c1e1900 */
/*0140*/ SHF.L.U32 R7, R7, 0x1, RZ ; /* 0x0000000107077819 */
/* 0x000fc800000006ff */
/*0150*/ ISETP.GE.AND P0, PT, R0, R7, PT ; /* 0x000000070000720c */
/* 0x000fe20003f06270 */
/*0160*/ FADD R9, R2, R9 ; /* 0x0000000902097221 */
/* 0x004fca0000000000 */
/*0170*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0001ee000c101904 */
/*0180*/ @P0 BRA 0xd0 ; /* 0xffffff4000000947 */
/* 0x000fea000383ffff */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ BRA 0x1a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z26hillis_steele_scan_forwardPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fc800078e0203 */
/*0060*/ IMAD.WIDE R2, R0, R11, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e020b */
/*0070*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IADD3 R6, R0.reuse, 0x1, RZ ; /* 0x0000000100067810 */
/* 0x040fe20007ffe0ff */
/*0090*/ IMAD.WIDE R4, R0, R11, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fe200078e020b */
/*00a0*/ MOV R7, c[0x0][0x170] ; /* 0x00005c0000077a02 */
/* 0x000fe40000000f00 */
/*00b0*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x170], PT ; /* 0x00005c0006007a0c */
/* 0x000fc80003f06270 */
/*00c0*/ ISETP.LT.OR P0, PT, R7, 0x2, P0 ; /* 0x000000020700780c */
/* 0x000fe20000701670 */
/*00d0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x0041d8000c101904 */
/*00e0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00f0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x1 ; /* 0x00000001ff077424 */
/* 0x000fe400078e00ff */
/*0100*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*0110*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0120*/ IMAD.WIDE R2, R6, R11, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fca00078e020b */
/*0130*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */
/* 0x000ea8000c1e1900 */
/*0140*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0150*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */
/* 0x000ea2000c1e1900 */
/*0160*/ SHF.L.U32 R7, R7, 0x1, RZ ; /* 0x0000000107077819 */
/* 0x000fc800000006ff */
/*0170*/ ISETP.LT.AND P1, PT, R7, c[0x0][0x170], PT ; /* 0x00005c0007007a0c */
/* 0x000fe20003f21270 */
/*0180*/ FADD R9, R6, R9 ; /* 0x0000000906097221 */
/* 0x004fe20000000000 */
/*0190*/ IADD3 R6, R0, R7, RZ ; /* 0x0000000700067210 */
/* 0x000fc80007ffe0ff */
/*01a0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x0001e2000c101904 */
/*01b0*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x170], PT ; /* 0x00005c0006007a0c */
/* 0x000fda0003f06270 */
/*01c0*/ @!P0 BRA P1, 0x100 ; /* 0xffffff3000008947 */
/* 0x001fea000083ffff */
/*01d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include<thrust/scan.h>
/*
Somethings so confuse me, why i can't get same correct result every time.
*/
/*
These two kernel could be used on large array, but slow
Best advice: use __syncthreads() before you want to use different index
*/
__global__ void hillis_steele_scan_forward(float * d_out, float * d_in, const int array_size){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = d_out[idx];
__syncthreads();
d_out[idx + step] += in1;
}
}
__global__ void hillis_steele_scan_backward(float * d_out, float * d_in){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = d_out[idx - step];
__syncthreads();
d_out[idx] += in1;
}
}
/*
These two kernel could be used on small array, but fast
*/
__global__ void shared_hillis_steele_scan_forward(float *d_out, float *d_in, const int array_size) {
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
} // the code below performs iterative scan on XY
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = sdata[idx];
__syncthreads();
sdata[idx + step] += in1;
}
d_out[idx] = sdata[idx];
}
__global__ void shared_hillis_steele_scan_backward(float * d_out, float * d_in, const int array_size){
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
}
sdata[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = sdata[idx - step];
__syncthreads();
sdata[idx] += in1;
}
d_out[idx] = sdata[idx];
}
/*
This kernel will get correct result when array size is power of 2
*/
__global__ void blelloch_exclusive_scan(float *g_odata, float *g_idata, int n)
{
extern __shared__ float temp[];// allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[thid] = g_idata[thid];
for (int d = n / 2; d > 0; d /= 2) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset <<= 1; //multiply by 2 implemented as bitwise operation
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[thid] = temp[thid];
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 1025;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
const int maxThreadPerBlock = 512;
const int numBlock = ARRAY_SIZE / maxThreadPerBlock + 1;
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
//hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in, ARRAY_SIZE);
//hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in);
//shared_hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//shared_hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//blelloch_exclusive_scan<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
thrust::inclusive_scan(h_in, h_in + ARRAY_SIZE, h_out);
//copy back the result array to the CPU
//cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include<thrust/scan.h>
/*
Somethings so confuse me, why i can't get same correct result every time.
*/
/*
These two kernel could be used on large array, but slow
Best advice: use __syncthreads() before you want to use different index
*/
__global__ void hillis_steele_scan_forward(float * d_out, float * d_in, const int array_size){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = d_out[idx];
__syncthreads();
d_out[idx + step] += in1;
}
}
__global__ void hillis_steele_scan_backward(float * d_out, float * d_in){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = d_out[idx - step];
__syncthreads();
d_out[idx] += in1;
}
}
/*
These two kernel could be used on small array, but fast
*/
__global__ void shared_hillis_steele_scan_forward(float *d_out, float *d_in, const int array_size) {
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
} // the code below performs iterative scan on XY
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = sdata[idx];
__syncthreads();
sdata[idx + step] += in1;
}
d_out[idx] = sdata[idx];
}
__global__ void shared_hillis_steele_scan_backward(float * d_out, float * d_in, const int array_size){
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
}
sdata[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = sdata[idx - step];
__syncthreads();
sdata[idx] += in1;
}
d_out[idx] = sdata[idx];
}
/*
This kernel will get correct result when array size is power of 2
*/
__global__ void blelloch_exclusive_scan(float *g_odata, float *g_idata, int n)
{
extern __shared__ float temp[];// allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[thid] = g_idata[thid];
for (int d = n / 2; d > 0; d /= 2) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset <<= 1; //multiply by 2 implemented as bitwise operation
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[thid] = temp[thid];
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 1025;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
const int maxThreadPerBlock = 512;
const int numBlock = ARRAY_SIZE / maxThreadPerBlock + 1;
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
//hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in, ARRAY_SIZE);
//hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in);
//shared_hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//shared_hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//blelloch_exclusive_scan<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
thrust::inclusive_scan(h_in, h_in + ARRAY_SIZE, h_out);
//copy back the result array to the CPU
//cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include<thrust/scan.h>
/*
Somethings so confuse me, why i can't get same correct result every time.
*/
/*
These two kernel could be used on large array, but slow
Best advice: use __syncthreads() before you want to use different index
*/
__global__ void hillis_steele_scan_forward(float * d_out, float * d_in, const int array_size){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = d_out[idx];
__syncthreads();
d_out[idx + step] += in1;
}
}
__global__ void hillis_steele_scan_backward(float * d_out, float * d_in){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = d_out[idx - step];
__syncthreads();
d_out[idx] += in1;
}
}
/*
These two kernel could be used on small array, but fast
*/
__global__ void shared_hillis_steele_scan_forward(float *d_out, float *d_in, const int array_size) {
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
} // the code below performs iterative scan on XY
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = sdata[idx];
__syncthreads();
sdata[idx + step] += in1;
}
d_out[idx] = sdata[idx];
}
__global__ void shared_hillis_steele_scan_backward(float * d_out, float * d_in, const int array_size){
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
}
sdata[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = sdata[idx - step];
__syncthreads();
sdata[idx] += in1;
}
d_out[idx] = sdata[idx];
}
/*
This kernel will get correct result when array size is power of 2
*/
__global__ void blelloch_exclusive_scan(float *g_odata, float *g_idata, int n)
{
extern __shared__ float temp[];// allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[thid] = g_idata[thid];
for (int d = n / 2; d > 0; d /= 2) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset <<= 1; //multiply by 2 implemented as bitwise operation
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[thid] = temp[thid];
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 1025;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
const int maxThreadPerBlock = 512;
const int numBlock = ARRAY_SIZE / maxThreadPerBlock + 1;
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
//hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in, ARRAY_SIZE);
//hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in);
//shared_hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//shared_hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//blelloch_exclusive_scan<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
thrust::inclusive_scan(h_in, h_in + ARRAY_SIZE, h_out);
//copy back the result array to the CPU
//cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z26hillis_steele_scan_forwardPfS_i
.globl _Z26hillis_steele_scan_forwardPfS_i
.p2align 8
.type _Z26hillis_steele_scan_forwardPfS_i,@function
_Z26hillis_steele_scan_forwardPfS_i:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b32 s0, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_cmp_lt_i32 s0, 2
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
global_load_b32 v0, v[4:5], off
s_waitcnt vmcnt(0)
global_store_b32 v[2:3], v0, off
s_cbranch_scc1 .LBB0_5
s_mov_b32 s1, 1
s_mov_b32 s2, 0
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s6
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s6, exec_lo, s3
s_or_b32 s2, s6, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execz .LBB0_5
.LBB0_3:
v_add_nc_u32_e32 v4, s1, v1
s_or_b32 s3, s3, exec_lo
s_mov_b32 s6, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s0, v4
s_cbranch_execz .LBB0_2
v_ashrrev_i32_e32 v5, 31, v4
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
global_load_b32 v0, v[2:3], off
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_waitcnt vmcnt(0)
s_barrier
buffer_gl0_inv
s_lshl_b32 s1, s1, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s1, s0
v_add_co_u32 v4, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
s_cselect_b32 s7, -1, 0
s_and_not1_b32 s3, s3, exec_lo
s_and_b32 s7, s7, exec_lo
global_load_b32 v6, v[4:5], off
s_or_b32 s3, s3, s7
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v0, v6
global_store_b32 v[4:5], v0, off
s_branch .LBB0_2
.LBB0_5:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z26hillis_steele_scan_forwardPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z26hillis_steele_scan_forwardPfS_i, .Lfunc_end0-_Z26hillis_steele_scan_forwardPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z27hillis_steele_scan_backwardPfS_
.globl _Z27hillis_steele_scan_backwardPfS_
.p2align 8
.type _Z27hillis_steele_scan_backwardPfS_,@function
_Z27hillis_steele_scan_backwardPfS_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b128 s[0:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v4, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_load_b32 v0, v[4:5], off
s_mov_b32 s2, 0
s_mov_b32 s3, exec_lo
s_waitcnt vmcnt(0)
global_store_b32 v[2:3], v0, off
v_cmpx_lt_i32_e32 0, v1
s_cbranch_execz .LBB1_5
v_mov_b32_e32 v5, 0
s_mov_b32 s3, 1
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_3
.p2align 6
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s5
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s5, exec_lo, s4
s_or_b32 s2, s5, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execz .LBB1_5
.LBB1_3:
v_subrev_nc_u32_e32 v4, s3, v1
s_or_b32 s4, s4, exec_lo
s_mov_b32 s5, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_lt_i32_e32 -1, v4
s_cbranch_execz .LBB1_2
v_lshlrev_b64 v[6:7], 2, v[4:5]
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_lshl_b32 s3, s3, 1
s_and_not1_b32 s4, s4, exec_lo
v_add_co_u32 v6, vcc_lo, s0, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v7, vcc_lo
v_cmp_gt_i32_e32 vcc_lo, s3, v1
global_load_b32 v0, v[6:7], off
s_waitcnt vmcnt(0)
s_barrier
buffer_gl0_inv
global_load_b32 v4, v[2:3], off
s_and_b32 s6, vcc_lo, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s4, s4, s6
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v0, v4
global_store_b32 v[2:3], v0, off
s_branch .LBB1_2
.LBB1_5:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z27hillis_steele_scan_backwardPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z27hillis_steele_scan_backwardPfS_, .Lfunc_end1-_Z27hillis_steele_scan_backwardPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z33shared_hillis_steele_scan_forwardPfS_i
.globl _Z33shared_hillis_steele_scan_forwardPfS_i
.p2align 8
.type _Z33shared_hillis_steele_scan_forwardPfS_i,@function
_Z33shared_hillis_steele_scan_forwardPfS_i:
s_load_b32 s2, s[0:1], 0x10
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s2, v0
s_cbranch_execz .LBB2_9
s_load_b64 s[4:5], s[0:1], 0x8
v_lshlrev_b32_e32 v1, 2, v0
s_mov_b32 s3, 0
s_cmp_lt_i32 s2, 2
s_waitcnt lgkmcnt(0)
global_load_b32 v2, v1, s[4:5]
v_add_nc_u32_e32 v1, 0, v1
s_mov_b32 s4, -1
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_cbranch_scc1 .LBB2_7
s_mov_b32 s5, 1
s_set_inst_prefetch_distance 0x1
s_branch .LBB2_4
.p2align 6
.LBB2_3:
s_or_b32 exec_lo, exec_lo, s8
s_xor_b32 s8, s6, -1
s_and_b32 s9, exec_lo, s7
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_or_b32 s3, s9, s3
s_and_not1_b32 s4, s4, exec_lo
s_and_b32 s8, s8, exec_lo
s_or_b32 s4, s4, s8
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execz .LBB2_6
.LBB2_4:
v_add_nc_u32_e32 v2, s5, v0
s_or_b32 s6, s6, exec_lo
s_or_b32 s7, s7, exec_lo
s_mov_b32 s8, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB2_3
v_lshl_add_u32 v2, v2, 2, 0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v4, v2
s_lshl_b32 s5, s5, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s5, s2
s_cselect_b32 s9, -1, 0
s_and_not1_b32 s7, s7, exec_lo
s_and_b32 s9, s9, exec_lo
s_and_not1_b32 s6, s6, exec_lo
s_or_b32 s7, s7, s9
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v3, v3, v4
ds_store_b32 v2, v3
s_branch .LBB2_3
.LBB2_6:
s_set_inst_prefetch_distance 0x2
s_or_b32 exec_lo, exec_lo, s3
.LBB2_7:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s4
s_cbranch_execz .LBB2_9
s_load_b64 s[0:1], s[0:1], 0x0
ds_load_b32 v1, v1
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v1, s[0:1]
.LBB2_9:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z33shared_hillis_steele_scan_forwardPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 10
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z33shared_hillis_steele_scan_forwardPfS_i, .Lfunc_end2-_Z33shared_hillis_steele_scan_forwardPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z34shared_hillis_steele_scan_backwardPfS_i
.globl _Z34shared_hillis_steele_scan_backwardPfS_i
.p2align 8
.type _Z34shared_hillis_steele_scan_backwardPfS_i,@function
_Z34shared_hillis_steele_scan_backwardPfS_i:
s_load_b32 s2, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s2, v0
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB3_9
s_load_b64 s[2:3], s[0:1], 0x8
v_lshlrev_b32_e32 v1, 2, v0
s_mov_b32 s4, -1
s_waitcnt lgkmcnt(0)
global_load_b32 v2, v1, s[2:3]
v_add_nc_u32_e32 v1, 0, v1
s_mov_b32 s3, 0
s_mov_b32 s2, exec_lo
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
v_cmpx_ne_u32_e32 0, v0
s_cbranch_execz .LBB3_7
s_mov_b32 s4, 1
s_set_inst_prefetch_distance 0x1
s_branch .LBB3_4
.p2align 6
.LBB3_3:
s_or_b32 exec_lo, exec_lo, s8
s_xor_b32 s8, s6, -1
s_and_b32 s9, exec_lo, s7
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_or_b32 s3, s9, s3
s_and_not1_b32 s5, s5, exec_lo
s_and_b32 s8, s8, exec_lo
s_or_b32 s5, s5, s8
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execz .LBB3_6
.LBB3_4:
v_subrev_nc_u32_e32 v2, s4, v0
s_or_b32 s6, s6, exec_lo
s_or_b32 s7, s7, exec_lo
s_mov_b32 s8, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_lt_i32_e32 -1, v2
s_cbranch_execz .LBB3_3
v_lshl_add_u32 v2, v2, 2, 0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_lshl_b32 s4, s4, 1
ds_load_b32 v2, v2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v3, v1
v_cmp_gt_u32_e32 vcc_lo, s4, v0
s_and_not1_b32 s7, s7, exec_lo
s_and_not1_b32 s6, s6, exec_lo
s_and_b32 s9, vcc_lo, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s7, s7, s9
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v2, v2, v3
ds_store_b32 v1, v2
s_branch .LBB3_3
.LBB3_6:
s_set_inst_prefetch_distance 0x2
s_or_b32 exec_lo, exec_lo, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s4, s5, exec_lo
.LBB3_7:
s_or_b32 exec_lo, exec_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s4
s_cbranch_execz .LBB3_9
s_load_b64 s[0:1], s[0:1], 0x0
ds_load_b32 v1, v1
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v1, s[0:1]
.LBB3_9:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z34shared_hillis_steele_scan_backwardPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 10
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z34shared_hillis_steele_scan_backwardPfS_i, .Lfunc_end3-_Z34shared_hillis_steele_scan_backwardPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z23blelloch_exclusive_scanPfS_i
.globl _Z23blelloch_exclusive_scanPfS_i
.p2align 8
.type _Z23blelloch_exclusive_scanPfS_i,@function
_Z23blelloch_exclusive_scanPfS_i:
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x8
s_load_b32 s2, s[0:1], 0x10
v_lshlrev_b32_e32 v1, 2, v0
s_mov_b32 s3, 1
s_waitcnt lgkmcnt(0)
global_load_b32 v2, v1, s[4:5]
v_add_nc_u32_e32 v1, 0, v1
s_cmp_lt_i32 s2, 2
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_cbranch_scc1 .LBB4_6
v_lshlrev_b32_e32 v3, 1, v0
s_mov_b32 s4, s2
s_delay_alu instid0(VALU_DEP_1)
v_or_b32_e32 v2, 1, v3
v_add_nc_u32_e32 v3, 2, v3
.p2align 6
.LBB4_2:
s_lshr_b32 s5, s4, 1
s_mov_b32 s6, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_u32_e64 s5, v0
s_cbranch_execz .LBB4_4
v_mul_lo_u32 v4, s3, v2
v_mul_lo_u32 v5, s3, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b32_e32 v4, 2, v4
v_lshlrev_b32_e32 v5, 2, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add3_u32 v4, v4, 0, -4
v_add3_u32 v5, v5, 0, -4
ds_load_b32 v4, v4
ds_load_b32 v6, v5
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v4, v4, v6
ds_store_b32 v5, v4
.LBB4_4:
s_or_b32 exec_lo, exec_lo, s6
s_lshl_b32 s3, s3, 1
s_cmp_lt_u32 s4, 4
s_cbranch_scc1 .LBB4_6
s_mov_b32 s4, s5
s_branch .LBB4_2
.LBB4_6:
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB4_8
s_lshl_b32 s5, s2, 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s5, s5, 0
s_add_i32 s5, s5, -4
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, s5
ds_store_b32 v3, v2
.LBB4_8:
s_or_b32 exec_lo, exec_lo, s4
s_cmp_lt_i32 s2, 2
s_cbranch_scc1 .LBB4_13
v_lshlrev_b32_e32 v3, 1, v0
s_mov_b32 s4, 1
s_delay_alu instid0(VALU_DEP_1)
v_or_b32_e32 v2, 1, v3
v_add_nc_u32_e32 v3, 2, v3
s_set_inst_prefetch_distance 0x1
s_branch .LBB4_11
.p2align 6
.LBB4_10:
s_or_b32 exec_lo, exec_lo, s5
s_lshl_b32 s4, s4, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s4, s2
s_cbranch_scc1 .LBB4_13
.LBB4_11:
s_ashr_i32 s3, s3, 1
s_mov_b32 s5, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB4_10
v_mul_lo_u32 v4, s3, v3
v_mul_lo_u32 v6, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b32_e32 v4, 2, v4
v_lshlrev_b32_e32 v6, 2, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add3_u32 v4, v4, 0, -4
v_add3_u32 v6, v6, 0, -4
ds_load_b32 v5, v4
ds_load_b32 v7, v6
s_waitcnt lgkmcnt(1)
ds_store_b32 v6, v5
ds_load_b32 v5, v4
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v5, v7, v5
ds_store_b32 v4, v5
s_branch .LBB4_10
.LBB4_13:
s_set_inst_prefetch_distance 0x2
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v1, v1
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23blelloch_exclusive_scanPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 7
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end4:
.size _Z23blelloch_exclusive_scanPfS_i, .Lfunc_end4-_Z23blelloch_exclusive_scanPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z26hillis_steele_scan_forwardPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z26hillis_steele_scan_forwardPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z27hillis_steele_scan_backwardPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z27hillis_steele_scan_backwardPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z33shared_hillis_steele_scan_forwardPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 10
.sgpr_spill_count: 0
.symbol: _Z33shared_hillis_steele_scan_forwardPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z34shared_hillis_steele_scan_backwardPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 12
.sgpr_spill_count: 0
.symbol: _Z34shared_hillis_steele_scan_backwardPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23blelloch_exclusive_scanPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 7
.sgpr_spill_count: 0
.symbol: _Z23blelloch_exclusive_scanPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include<thrust/scan.h>
/*
Somethings so confuse me, why i can't get same correct result every time.
*/
/*
These two kernel could be used on large array, but slow
Best advice: use __syncthreads() before you want to use different index
*/
__global__ void hillis_steele_scan_forward(float * d_out, float * d_in, const int array_size){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = d_out[idx];
__syncthreads();
d_out[idx + step] += in1;
}
}
__global__ void hillis_steele_scan_backward(float * d_out, float * d_in){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = d_out[idx - step];
__syncthreads();
d_out[idx] += in1;
}
}
/*
These two kernel could be used on small array, but fast
*/
__global__ void shared_hillis_steele_scan_forward(float *d_out, float *d_in, const int array_size) {
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
} // the code below performs iterative scan on XY
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = sdata[idx];
__syncthreads();
sdata[idx + step] += in1;
}
d_out[idx] = sdata[idx];
}
__global__ void shared_hillis_steele_scan_backward(float * d_out, float * d_in, const int array_size){
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
}
sdata[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = sdata[idx - step];
__syncthreads();
sdata[idx] += in1;
}
d_out[idx] = sdata[idx];
}
/*
This kernel will get correct result when array size is power of 2
*/
__global__ void blelloch_exclusive_scan(float *g_odata, float *g_idata, int n)
{
extern __shared__ float temp[];// allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[thid] = g_idata[thid];
for (int d = n / 2; d > 0; d /= 2) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset <<= 1; //multiply by 2 implemented as bitwise operation
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[thid] = temp[thid];
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 1025;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
const int maxThreadPerBlock = 512;
const int numBlock = ARRAY_SIZE / maxThreadPerBlock + 1;
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
//hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in, ARRAY_SIZE);
//hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in);
//shared_hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//shared_hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//blelloch_exclusive_scan<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
thrust::inclusive_scan(h_in, h_in + ARRAY_SIZE, h_out);
//copy back the result array to the CPU
//cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
} | .text
.file "demo_scan.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z41__device_stub__hillis_steele_scan_forwardPfS_i # -- Begin function _Z41__device_stub__hillis_steele_scan_forwardPfS_i
.p2align 4, 0x90
.type _Z41__device_stub__hillis_steele_scan_forwardPfS_i,@function
_Z41__device_stub__hillis_steele_scan_forwardPfS_i: # @_Z41__device_stub__hillis_steele_scan_forwardPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z26hillis_steele_scan_forwardPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z41__device_stub__hillis_steele_scan_forwardPfS_i, .Lfunc_end0-_Z41__device_stub__hillis_steele_scan_forwardPfS_i
.cfi_endproc
# -- End function
.globl _Z42__device_stub__hillis_steele_scan_backwardPfS_ # -- Begin function _Z42__device_stub__hillis_steele_scan_backwardPfS_
.p2align 4, 0x90
.type _Z42__device_stub__hillis_steele_scan_backwardPfS_,@function
_Z42__device_stub__hillis_steele_scan_backwardPfS_: # @_Z42__device_stub__hillis_steele_scan_backwardPfS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z27hillis_steele_scan_backwardPfS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z42__device_stub__hillis_steele_scan_backwardPfS_, .Lfunc_end1-_Z42__device_stub__hillis_steele_scan_backwardPfS_
.cfi_endproc
# -- End function
.globl _Z48__device_stub__shared_hillis_steele_scan_forwardPfS_i # -- Begin function _Z48__device_stub__shared_hillis_steele_scan_forwardPfS_i
.p2align 4, 0x90
.type _Z48__device_stub__shared_hillis_steele_scan_forwardPfS_i,@function
_Z48__device_stub__shared_hillis_steele_scan_forwardPfS_i: # @_Z48__device_stub__shared_hillis_steele_scan_forwardPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z33shared_hillis_steele_scan_forwardPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z48__device_stub__shared_hillis_steele_scan_forwardPfS_i, .Lfunc_end2-_Z48__device_stub__shared_hillis_steele_scan_forwardPfS_i
.cfi_endproc
# -- End function
.globl _Z49__device_stub__shared_hillis_steele_scan_backwardPfS_i # -- Begin function _Z49__device_stub__shared_hillis_steele_scan_backwardPfS_i
.p2align 4, 0x90
.type _Z49__device_stub__shared_hillis_steele_scan_backwardPfS_i,@function
_Z49__device_stub__shared_hillis_steele_scan_backwardPfS_i: # @_Z49__device_stub__shared_hillis_steele_scan_backwardPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z34shared_hillis_steele_scan_backwardPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end3:
.size _Z49__device_stub__shared_hillis_steele_scan_backwardPfS_i, .Lfunc_end3-_Z49__device_stub__shared_hillis_steele_scan_backwardPfS_i
.cfi_endproc
# -- End function
.globl _Z38__device_stub__blelloch_exclusive_scanPfS_i # -- Begin function _Z38__device_stub__blelloch_exclusive_scanPfS_i
.p2align 4, 0x90
.type _Z38__device_stub__blelloch_exclusive_scanPfS_i,@function
_Z38__device_stub__blelloch_exclusive_scanPfS_i: # @_Z38__device_stub__blelloch_exclusive_scanPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23blelloch_exclusive_scanPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end4:
.size _Z38__device_stub__blelloch_exclusive_scanPfS_i, .Lfunc_end4-_Z38__device_stub__blelloch_exclusive_scanPfS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $8232, %rsp # imm = 0x2028
.cfi_def_cfa_offset 8256
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_1: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 16(%rsp,%rax,4)
incq %rax
cmpq $1025, %rax # imm = 0x401
jne .LBB5_1
# %bb.2:
movq %rsp, %rdi
movl $4100, %esi # imm = 0x1004
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4100, %esi # imm = 0x1004
callq hipMalloc
movq (%rsp), %rdi
leaq 16(%rsp), %rsi
movl $4100, %edx # imm = 0x1004
movl $1, %ecx
callq hipMemcpy
movss 16(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, 4128(%rsp)
movl $4, %eax
.p2align 4, 0x90
.LBB5_3: # %.lr.ph.i.i.i.i.i
# =>This Inner Loop Header: Depth=1
addss 16(%rsp,%rax), %xmm0
movss %xmm0, 4128(%rsp,%rax)
addq $4, %rax
cmpq $4100, %rax # imm = 0x1004
jne .LBB5_3
# %bb.4: # %_ZN6thrust14inclusive_scanIPfS1_EET0_T_S3_S2_.exit.preheader
movl $.L.str.2, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB5_5: # %_ZN6thrust14inclusive_scanIPfS1_EET0_T_S3_S2_.exit
# =>This Inner Loop Header: Depth=1
movss 4128(%rsp,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
movl %r14d, %eax
notl %eax
testb $3, %al
movl $.L.str.1, %edi
cmoveq %rbx, %rdi
xorl %eax, %eax
callq printf
incq %r14
cmpq $1025, %r14 # imm = 0x401
jne .LBB5_5
# %bb.6:
movq (%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $8232, %rsp # imm = 0x2028
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z26hillis_steele_scan_forwardPfS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z27hillis_steele_scan_backwardPfS_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z33shared_hillis_steele_scan_forwardPfS_i, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z34shared_hillis_steele_scan_backwardPfS_i, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23blelloch_exclusive_scanPfS_i, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z26hillis_steele_scan_forwardPfS_i,@object # @_Z26hillis_steele_scan_forwardPfS_i
.section .rodata,"a",@progbits
.globl _Z26hillis_steele_scan_forwardPfS_i
.p2align 3, 0x0
_Z26hillis_steele_scan_forwardPfS_i:
.quad _Z41__device_stub__hillis_steele_scan_forwardPfS_i
.size _Z26hillis_steele_scan_forwardPfS_i, 8
.type _Z27hillis_steele_scan_backwardPfS_,@object # @_Z27hillis_steele_scan_backwardPfS_
.globl _Z27hillis_steele_scan_backwardPfS_
.p2align 3, 0x0
_Z27hillis_steele_scan_backwardPfS_:
.quad _Z42__device_stub__hillis_steele_scan_backwardPfS_
.size _Z27hillis_steele_scan_backwardPfS_, 8
.type _Z33shared_hillis_steele_scan_forwardPfS_i,@object # @_Z33shared_hillis_steele_scan_forwardPfS_i
.globl _Z33shared_hillis_steele_scan_forwardPfS_i
.p2align 3, 0x0
_Z33shared_hillis_steele_scan_forwardPfS_i:
.quad _Z48__device_stub__shared_hillis_steele_scan_forwardPfS_i
.size _Z33shared_hillis_steele_scan_forwardPfS_i, 8
.type _Z34shared_hillis_steele_scan_backwardPfS_i,@object # @_Z34shared_hillis_steele_scan_backwardPfS_i
.globl _Z34shared_hillis_steele_scan_backwardPfS_i
.p2align 3, 0x0
_Z34shared_hillis_steele_scan_backwardPfS_i:
.quad _Z49__device_stub__shared_hillis_steele_scan_backwardPfS_i
.size _Z34shared_hillis_steele_scan_backwardPfS_i, 8
.type _Z23blelloch_exclusive_scanPfS_i,@object # @_Z23blelloch_exclusive_scanPfS_i
.globl _Z23blelloch_exclusive_scanPfS_i
.p2align 3, 0x0
_Z23blelloch_exclusive_scanPfS_i:
.quad _Z38__device_stub__blelloch_exclusive_scanPfS_i
.size _Z23blelloch_exclusive_scanPfS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%f"
.size .L.str, 3
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\t"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\n"
.size .L.str.2, 2
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z26hillis_steele_scan_forwardPfS_i"
.size .L__unnamed_1, 36
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z27hillis_steele_scan_backwardPfS_"
.size .L__unnamed_2, 36
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z33shared_hillis_steele_scan_forwardPfS_i"
.size .L__unnamed_3, 43
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z34shared_hillis_steele_scan_backwardPfS_i"
.size .L__unnamed_4, 44
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "_Z23blelloch_exclusive_scanPfS_i"
.size .L__unnamed_5, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z41__device_stub__hillis_steele_scan_forwardPfS_i
.addrsig_sym _Z42__device_stub__hillis_steele_scan_backwardPfS_
.addrsig_sym _Z48__device_stub__shared_hillis_steele_scan_forwardPfS_i
.addrsig_sym _Z49__device_stub__shared_hillis_steele_scan_backwardPfS_i
.addrsig_sym _Z38__device_stub__blelloch_exclusive_scanPfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z26hillis_steele_scan_forwardPfS_i
.addrsig_sym _Z27hillis_steele_scan_backwardPfS_
.addrsig_sym _Z33shared_hillis_steele_scan_forwardPfS_i
.addrsig_sym _Z34shared_hillis_steele_scan_backwardPfS_i
.addrsig_sym _Z23blelloch_exclusive_scanPfS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <random>
#include <cmath>
#include <chrono>
#define ARRAY_SIZE 10000000
#define BLOCK_SIZE 256
__global__ void device_saxpy(float* x, float* y, const float a)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
y[i] = a * x[i] + y[i];
}
void host_saxpy(float x[], float y[], const float a)
{
for(int i = 0; i < ARRAY_SIZE; i++) {
y[i] = a * x[i] + y[i];
}
}
int main()
{
// ============= SET UP ARRAYS ============== //
std::default_random_engine rdmGen;
std::uniform_real_distribution<float> dist(0.0, 5.0);
const float a = 1.0;
float* x = (float*)malloc(ARRAY_SIZE * sizeof(float));
float* y = (float*)malloc(ARRAY_SIZE * sizeof(float));
for (int i = 0; i < ARRAY_SIZE; i++) {
x[i] = dist(rdmGen);
y[i] = dist(rdmGen);
}
// ============= START COMPUTING ON DEVICE ============== //
printf("Computing SAXPY on the GPU... ");
// Create, allocate and copy array to device
auto start = std::chrono::system_clock::now();
float* d_x = 0;
float* d_y = 0;
cudaMalloc(&d_x, ARRAY_SIZE * sizeof(float));
cudaMalloc(&d_y, ARRAY_SIZE * sizeof(float));
cudaMemcpy(d_x, x, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
device_saxpy<<<(ARRAY_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE,
BLOCK_SIZE>>>(d_x, d_y, a);
cudaDeviceSynchronize();
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> host_time = (end-start) * 1000;
printf("Done in %f ms!\n\n", host_time.count());
// ============= START COMPUTING ON HOST ============== //
printf("Computing SAXPY on the CPU... ");
start = std::chrono::system_clock::now();
host_saxpy(x, y, a);
end = std::chrono::system_clock::now();
std::chrono::duration<double> device_time = (end-start) * 1000;
printf("Done in %f ms!\n\n", device_time.count());
// ============= COMPARE OUTPUTS ============== //
// Get results from device and store in d_res
cudaMemcpy(x, d_y, ARRAY_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
printf("Comparing the output for each implementation... ");
bool correct = true;
for(int i = 0; i < ARRAY_SIZE; i++) {
if(abs(x[i] - y[i]) > 0.0001) { // x is device result, y is host result
correct = false;
break;
}
}
if(correct) printf("Correct!\n");
else printf("Incorrect!\n");
// ============= FREE RESOURCES ============== //
free(y);
free(x);
cudaFree(d_y);
cudaFree(d_x);
return 0;
} | code for sm_80
Function : _Z12device_saxpyPfS_f
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fc800078e0205 */
/*0070*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fe400078e0205 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ FFMA R7, R2, c[0x0][0x170], R7 ; /* 0x00005c0002077a23 */
/* 0x004fca0000000007 */
/*00b0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <random>
#include <cmath>
#include <chrono>
#define ARRAY_SIZE 10000000
#define BLOCK_SIZE 256
__global__ void device_saxpy(float* x, float* y, const float a)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
y[i] = a * x[i] + y[i];
}
void host_saxpy(float x[], float y[], const float a)
{
for(int i = 0; i < ARRAY_SIZE; i++) {
y[i] = a * x[i] + y[i];
}
}
int main()
{
// ============= SET UP ARRAYS ============== //
std::default_random_engine rdmGen;
std::uniform_real_distribution<float> dist(0.0, 5.0);
const float a = 1.0;
float* x = (float*)malloc(ARRAY_SIZE * sizeof(float));
float* y = (float*)malloc(ARRAY_SIZE * sizeof(float));
for (int i = 0; i < ARRAY_SIZE; i++) {
x[i] = dist(rdmGen);
y[i] = dist(rdmGen);
}
// ============= START COMPUTING ON DEVICE ============== //
printf("Computing SAXPY on the GPU... ");
// Create, allocate and copy array to device
auto start = std::chrono::system_clock::now();
float* d_x = 0;
float* d_y = 0;
cudaMalloc(&d_x, ARRAY_SIZE * sizeof(float));
cudaMalloc(&d_y, ARRAY_SIZE * sizeof(float));
cudaMemcpy(d_x, x, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
device_saxpy<<<(ARRAY_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE,
BLOCK_SIZE>>>(d_x, d_y, a);
cudaDeviceSynchronize();
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> host_time = (end-start) * 1000;
printf("Done in %f ms!\n\n", host_time.count());
// ============= START COMPUTING ON HOST ============== //
printf("Computing SAXPY on the CPU... ");
start = std::chrono::system_clock::now();
host_saxpy(x, y, a);
end = std::chrono::system_clock::now();
std::chrono::duration<double> device_time = (end-start) * 1000;
printf("Done in %f ms!\n\n", device_time.count());
// ============= COMPARE OUTPUTS ============== //
// Get results from device and store in d_res
cudaMemcpy(x, d_y, ARRAY_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
printf("Comparing the output for each implementation... ");
bool correct = true;
for(int i = 0; i < ARRAY_SIZE; i++) {
if(abs(x[i] - y[i]) > 0.0001) { // x is device result, y is host result
correct = false;
break;
}
}
if(correct) printf("Correct!\n");
else printf("Incorrect!\n");
// ============= FREE RESOURCES ============== //
free(y);
free(x);
cudaFree(d_y);
cudaFree(d_x);
return 0;
} | .file "tmpxft_0010903f_00000000-6_exercise_2.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4248:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4248:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10host_saxpyPfS_f
.type _Z10host_saxpyPfS_f, @function
_Z10host_saxpyPfS_f:
.LFB4240:
.cfi_startproc
endbr64
movl $0, %eax
.L4:
movaps %xmm0, %xmm1
mulss (%rdi,%rax), %xmm1
addss (%rsi,%rax), %xmm1
movss %xmm1, (%rsi,%rax)
addq $4, %rax
cmpq $40000000, %rax
jne .L4
ret
.cfi_endproc
.LFE4240:
.size _Z10host_saxpyPfS_f, .-_Z10host_saxpyPfS_f
.globl _Z35__device_stub__Z12device_saxpyPfS_fPfS_f
.type _Z35__device_stub__Z12device_saxpyPfS_fPfS_f, @function
_Z35__device_stub__Z12device_saxpyPfS_fPfS_f:
.LFB4270:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L10
.L6:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L11
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L10:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12device_saxpyPfS_f(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L6
.L11:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4270:
.size _Z35__device_stub__Z12device_saxpyPfS_fPfS_f, .-_Z35__device_stub__Z12device_saxpyPfS_fPfS_f
.globl _Z12device_saxpyPfS_f
.type _Z12device_saxpyPfS_f, @function
_Z12device_saxpyPfS_f:
.LFB4271:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z12device_saxpyPfS_fPfS_f
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4271:
.size _Z12device_saxpyPfS_f, .-_Z12device_saxpyPfS_f
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z12device_saxpyPfS_f"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4273:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z12device_saxpyPfS_f(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4273:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._ZSt18generate_canonicalIfLm24ESt26linear_congruential_engineImLm16807ELm0ELm2147483647EEET_RT1_,"axG",@progbits,_ZSt18generate_canonicalIfLm24ESt26linear_congruential_engineImLm16807ELm0ELm2147483647EEET_RT1_,comdat
.weak _ZSt18generate_canonicalIfLm24ESt26linear_congruential_engineImLm16807ELm0ELm2147483647EEET_RT1_
.type _ZSt18generate_canonicalIfLm24ESt26linear_congruential_engineImLm16807ELm0ELm2147483647EEET_RT1_, @function
_ZSt18generate_canonicalIfLm24ESt26linear_congruential_engineImLm16807ELm0ELm2147483647EEET_RT1_:
.LFB4958:
.cfi_startproc
endbr64
imulq $16807, (%rdi), %rcx
movabsq $8589934597, %rdx
movq %rcx, %rax
mulq %rdx
movq %rcx, %rax
subq %rdx, %rax
shrq %rax
addq %rax, %rdx
shrq $30, %rdx
movq %rdx, %rax
salq $31, %rax
subq %rdx, %rax
subq %rax, %rcx
movq %rcx, %rdx
movq %rcx, (%rdi)
subq $1, %rdx
js .L17
pxor %xmm0, %xmm0
cvtsi2ssq %rdx, %xmm0
.L18:
pxor %xmm1, %xmm1
addss %xmm1, %xmm0
mulss .LC3(%rip), %xmm0
comiss .LC4(%rip), %xmm0
jnb .L21
.L16:
ret
.L17:
movq %rdx, %rax
shrq %rax
andl $1, %edx
orq %rdx, %rax
pxor %xmm0, %xmm0
cvtsi2ssq %rax, %xmm0
addss %xmm0, %xmm0
jmp .L18
.L21:
movss .LC1(%rip), %xmm0
ret
.cfi_endproc
.LFE4958:
.size _ZSt18generate_canonicalIfLm24ESt26linear_congruential_engineImLm16807ELm0ELm2147483647EEET_RT1_, .-_ZSt18generate_canonicalIfLm24ESt26linear_congruential_engineImLm16807ELm0ELm2147483647EEET_RT1_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC6:
.string "Computing SAXPY on the GPU... "
.section .rodata.str1.1
.LC8:
.string "Done in %f ms!\n\n"
.section .rodata.str1.8
.align 8
.LC9:
.string "Computing SAXPY on the CPU... "
.align 8
.LC10:
.string "Comparing the output for each implementation... "
.section .rodata.str1.1
.LC13:
.string "Correct!\n"
.LC14:
.string "Incorrect!\n"
.text
.globl main
.type main, @function
main:
.LFB4241:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $72, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq $1, 8(%rsp)
movl $40000000, %edi
call malloc@PLT
movq %rax, %rbp
movl $40000000, %edi
call malloc@PLT
movq %rax, %r12
movl $0, %ebx
.L23:
leaq 8(%rsp), %r13
movq %r13, %rdi
call _ZSt18generate_canonicalIfLm24ESt26linear_congruential_engineImLm16807ELm0ELm2147483647EEET_RT1_
mulss .LC5(%rip), %xmm0
pxor %xmm3, %xmm3
addss %xmm3, %xmm0
movss %xmm0, 0(%rbp,%rbx)
movq %r13, %rdi
call _ZSt18generate_canonicalIfLm24ESt26linear_congruential_engineImLm16807ELm0ELm2147483647EEET_RT1_
mulss .LC5(%rip), %xmm0
pxor %xmm4, %xmm4
addss %xmm4, %xmm0
movss %xmm0, (%r12,%rbx)
addq $4, %rbx
cmpq $40000000, %rbx
jne .L23
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %rbx
movq $0, 16(%rsp)
movq $0, 24(%rsp)
leaq 16(%rsp), %rdi
movl $40000000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $40000000, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $40000000, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $40000000, %edx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $39063, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L35
.L24:
call cudaDeviceSynchronize@PLT
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
subq %rbx, %rax
imulq $1000, %rax, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC7(%rip), %xmm0
leaq .LC8(%rip), %rbx
movq %rbx, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %r13
movss .LC4(%rip), %xmm0
movq %r12, %rsi
movq %rbp, %rdi
call _Z10host_saxpyPfS_f
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
subq %r13, %rax
imulq $1000, %rax, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC7(%rip), %xmm0
movq %rbx, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movl $40000000, %edx
movq 24(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
movss .LC11(%rip), %xmm2
movsd .LC12(%rip), %xmm1
.L26:
movss 0(%rbp,%rax), %xmm0
subss (%r12,%rax), %xmm0
andps %xmm2, %xmm0
cvtss2sd %xmm0, %xmm0
comisd %xmm1, %xmm0
ja .L25
addq $4, %rax
cmpq $40000000, %rax
jne .L26
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L28:
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L36
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L35:
.cfi_restore_state
movss .LC4(%rip), %xmm0
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z35__device_stub__Z12device_saxpyPfS_fPfS_f
jmp .L24
.L25:
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L28
.L36:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4241:
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1065353215
.align 4
.LC3:
.long 805306368
.align 4
.LC4:
.long 1065353216
.align 4
.LC5:
.long 1084227584
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC7:
.long 0
.long 1104006501
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC11:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC12:
.long -350469331
.long 1058682594
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <random>
#include <cmath>
#include <chrono>
#define ARRAY_SIZE 10000000
#define BLOCK_SIZE 256
__global__ void device_saxpy(float* x, float* y, const float a)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
y[i] = a * x[i] + y[i];
}
void host_saxpy(float x[], float y[], const float a)
{
for(int i = 0; i < ARRAY_SIZE; i++) {
y[i] = a * x[i] + y[i];
}
}
int main()
{
// ============= SET UP ARRAYS ============== //
std::default_random_engine rdmGen;
std::uniform_real_distribution<float> dist(0.0, 5.0);
const float a = 1.0;
float* x = (float*)malloc(ARRAY_SIZE * sizeof(float));
float* y = (float*)malloc(ARRAY_SIZE * sizeof(float));
for (int i = 0; i < ARRAY_SIZE; i++) {
x[i] = dist(rdmGen);
y[i] = dist(rdmGen);
}
// ============= START COMPUTING ON DEVICE ============== //
printf("Computing SAXPY on the GPU... ");
// Create, allocate and copy array to device
auto start = std::chrono::system_clock::now();
float* d_x = 0;
float* d_y = 0;
cudaMalloc(&d_x, ARRAY_SIZE * sizeof(float));
cudaMalloc(&d_y, ARRAY_SIZE * sizeof(float));
cudaMemcpy(d_x, x, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
device_saxpy<<<(ARRAY_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE,
BLOCK_SIZE>>>(d_x, d_y, a);
cudaDeviceSynchronize();
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> host_time = (end-start) * 1000;
printf("Done in %f ms!\n\n", host_time.count());
// ============= START COMPUTING ON HOST ============== //
printf("Computing SAXPY on the CPU... ");
start = std::chrono::system_clock::now();
host_saxpy(x, y, a);
end = std::chrono::system_clock::now();
std::chrono::duration<double> device_time = (end-start) * 1000;
printf("Done in %f ms!\n\n", device_time.count());
// ============= COMPARE OUTPUTS ============== //
// Get results from device and store in d_res
cudaMemcpy(x, d_y, ARRAY_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
printf("Comparing the output for each implementation... ");
bool correct = true;
for(int i = 0; i < ARRAY_SIZE; i++) {
if(abs(x[i] - y[i]) > 0.0001) { // x is device result, y is host result
correct = false;
break;
}
}
if(correct) printf("Correct!\n");
else printf("Incorrect!\n");
// ============= FREE RESOURCES ============== //
free(y);
free(x);
cudaFree(d_y);
cudaFree(d_x);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <random>
#include <cmath>
#include <chrono>
#define ARRAY_SIZE 10000000
#define BLOCK_SIZE 256
__global__ void device_saxpy(float* x, float* y, const float a)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
y[i] = a * x[i] + y[i];
}
void host_saxpy(float x[], float y[], const float a)
{
for(int i = 0; i < ARRAY_SIZE; i++) {
y[i] = a * x[i] + y[i];
}
}
int main()
{
// ============= SET UP ARRAYS ============== //
std::default_random_engine rdmGen;
std::uniform_real_distribution<float> dist(0.0, 5.0);
const float a = 1.0;
float* x = (float*)malloc(ARRAY_SIZE * sizeof(float));
float* y = (float*)malloc(ARRAY_SIZE * sizeof(float));
for (int i = 0; i < ARRAY_SIZE; i++) {
x[i] = dist(rdmGen);
y[i] = dist(rdmGen);
}
// ============= START COMPUTING ON DEVICE ============== //
printf("Computing SAXPY on the GPU... ");
// Create, allocate and copy array to device
auto start = std::chrono::system_clock::now();
float* d_x = 0;
float* d_y = 0;
hipMalloc(&d_x, ARRAY_SIZE * sizeof(float));
hipMalloc(&d_y, ARRAY_SIZE * sizeof(float));
hipMemcpy(d_x, x, ARRAY_SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, ARRAY_SIZE * sizeof(float), hipMemcpyHostToDevice);
device_saxpy<<<(ARRAY_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE,
BLOCK_SIZE>>>(d_x, d_y, a);
hipDeviceSynchronize();
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> host_time = (end-start) * 1000;
printf("Done in %f ms!\n\n", host_time.count());
// ============= START COMPUTING ON HOST ============== //
printf("Computing SAXPY on the CPU... ");
start = std::chrono::system_clock::now();
host_saxpy(x, y, a);
end = std::chrono::system_clock::now();
std::chrono::duration<double> device_time = (end-start) * 1000;
printf("Done in %f ms!\n\n", device_time.count());
// ============= COMPARE OUTPUTS ============== //
// Get results from device and store in d_res
hipMemcpy(x, d_y, ARRAY_SIZE * sizeof(float), hipMemcpyDeviceToHost);
printf("Comparing the output for each implementation... ");
bool correct = true;
for(int i = 0; i < ARRAY_SIZE; i++) {
if(abs(x[i] - y[i]) > 0.0001) { // x is device result, y is host result
correct = false;
break;
}
}
if(correct) printf("Correct!\n");
else printf("Incorrect!\n");
// ============= FREE RESOURCES ============== //
free(y);
free(x);
hipFree(d_y);
hipFree(d_x);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <random>
#include <cmath>
#include <chrono>
#define ARRAY_SIZE 10000000
#define BLOCK_SIZE 256
__global__ void device_saxpy(float* x, float* y, const float a)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
y[i] = a * x[i] + y[i];
}
void host_saxpy(float x[], float y[], const float a)
{
for(int i = 0; i < ARRAY_SIZE; i++) {
y[i] = a * x[i] + y[i];
}
}
int main()
{
// ============= SET UP ARRAYS ============== //
std::default_random_engine rdmGen;
std::uniform_real_distribution<float> dist(0.0, 5.0);
const float a = 1.0;
float* x = (float*)malloc(ARRAY_SIZE * sizeof(float));
float* y = (float*)malloc(ARRAY_SIZE * sizeof(float));
for (int i = 0; i < ARRAY_SIZE; i++) {
x[i] = dist(rdmGen);
y[i] = dist(rdmGen);
}
// ============= START COMPUTING ON DEVICE ============== //
printf("Computing SAXPY on the GPU... ");
// Create, allocate and copy array to device
auto start = std::chrono::system_clock::now();
float* d_x = 0;
float* d_y = 0;
hipMalloc(&d_x, ARRAY_SIZE * sizeof(float));
hipMalloc(&d_y, ARRAY_SIZE * sizeof(float));
hipMemcpy(d_x, x, ARRAY_SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, ARRAY_SIZE * sizeof(float), hipMemcpyHostToDevice);
device_saxpy<<<(ARRAY_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE,
BLOCK_SIZE>>>(d_x, d_y, a);
hipDeviceSynchronize();
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> host_time = (end-start) * 1000;
printf("Done in %f ms!\n\n", host_time.count());
// ============= START COMPUTING ON HOST ============== //
printf("Computing SAXPY on the CPU... ");
start = std::chrono::system_clock::now();
host_saxpy(x, y, a);
end = std::chrono::system_clock::now();
std::chrono::duration<double> device_time = (end-start) * 1000;
printf("Done in %f ms!\n\n", device_time.count());
// ============= COMPARE OUTPUTS ============== //
// Get results from device and store in d_res
hipMemcpy(x, d_y, ARRAY_SIZE * sizeof(float), hipMemcpyDeviceToHost);
printf("Comparing the output for each implementation... ");
bool correct = true;
for(int i = 0; i < ARRAY_SIZE; i++) {
if(abs(x[i] - y[i]) > 0.0001) { // x is device result, y is host result
correct = false;
break;
}
}
if(correct) printf("Correct!\n");
else printf("Incorrect!\n");
// ============= FREE RESOURCES ============== //
free(y);
free(x);
hipFree(d_y);
hipFree(d_x);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12device_saxpyPfS_f
.globl _Z12device_saxpyPfS_f
.p2align 8
.type _Z12device_saxpyPfS_f,@function
_Z12device_saxpyPfS_f:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b32 s0, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v3, s0, v2
global_store_b32 v[0:1], v3, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12device_saxpyPfS_f
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12device_saxpyPfS_f, .Lfunc_end0-_Z12device_saxpyPfS_f
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12device_saxpyPfS_f
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12device_saxpyPfS_f.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <random>
#include <cmath>
#include <chrono>
#define ARRAY_SIZE 10000000
#define BLOCK_SIZE 256
__global__ void device_saxpy(float* x, float* y, const float a)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
y[i] = a * x[i] + y[i];
}
void host_saxpy(float x[], float y[], const float a)
{
for(int i = 0; i < ARRAY_SIZE; i++) {
y[i] = a * x[i] + y[i];
}
}
int main()
{
// ============= SET UP ARRAYS ============== //
std::default_random_engine rdmGen;
std::uniform_real_distribution<float> dist(0.0, 5.0);
const float a = 1.0;
float* x = (float*)malloc(ARRAY_SIZE * sizeof(float));
float* y = (float*)malloc(ARRAY_SIZE * sizeof(float));
for (int i = 0; i < ARRAY_SIZE; i++) {
x[i] = dist(rdmGen);
y[i] = dist(rdmGen);
}
// ============= START COMPUTING ON DEVICE ============== //
printf("Computing SAXPY on the GPU... ");
// Create, allocate and copy array to device
auto start = std::chrono::system_clock::now();
float* d_x = 0;
float* d_y = 0;
hipMalloc(&d_x, ARRAY_SIZE * sizeof(float));
hipMalloc(&d_y, ARRAY_SIZE * sizeof(float));
hipMemcpy(d_x, x, ARRAY_SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, ARRAY_SIZE * sizeof(float), hipMemcpyHostToDevice);
device_saxpy<<<(ARRAY_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE,
BLOCK_SIZE>>>(d_x, d_y, a);
hipDeviceSynchronize();
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> host_time = (end-start) * 1000;
printf("Done in %f ms!\n\n", host_time.count());
// ============= START COMPUTING ON HOST ============== //
printf("Computing SAXPY on the CPU... ");
start = std::chrono::system_clock::now();
host_saxpy(x, y, a);
end = std::chrono::system_clock::now();
std::chrono::duration<double> device_time = (end-start) * 1000;
printf("Done in %f ms!\n\n", device_time.count());
// ============= COMPARE OUTPUTS ============== //
// Get results from device and store in d_res
hipMemcpy(x, d_y, ARRAY_SIZE * sizeof(float), hipMemcpyDeviceToHost);
printf("Comparing the output for each implementation... ");
bool correct = true;
for(int i = 0; i < ARRAY_SIZE; i++) {
if(abs(x[i] - y[i]) > 0.0001) { // x is device result, y is host result
correct = false;
break;
}
}
if(correct) printf("Correct!\n");
else printf("Incorrect!\n");
// ============= FREE RESOURCES ============== //
free(y);
free(x);
hipFree(d_y);
hipFree(d_x);
return 0;
} | .text
.file "exercise_2.hip"
.globl _Z27__device_stub__device_saxpyPfS_f # -- Begin function _Z27__device_stub__device_saxpyPfS_f
.p2align 4, 0x90
.type _Z27__device_stub__device_saxpyPfS_f,@function
_Z27__device_stub__device_saxpyPfS_f: # @_Z27__device_stub__device_saxpyPfS_f
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12device_saxpyPfS_f, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z27__device_stub__device_saxpyPfS_f, .Lfunc_end0-_Z27__device_stub__device_saxpyPfS_f
.cfi_endproc
# -- End function
.globl _Z10host_saxpyPfS_f # -- Begin function _Z10host_saxpyPfS_f
.p2align 4, 0x90
.type _Z10host_saxpyPfS_f,@function
_Z10host_saxpyPfS_f: # @_Z10host_saxpyPfS_f
.cfi_startproc
# %bb.0:
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movss (%rdi,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss %xmm0, %xmm1
addss (%rsi,%rax,4), %xmm1
movss %xmm1, (%rsi,%rax,4)
incq %rax
cmpq $10000000, %rax # imm = 0x989680
jne .LBB1_1
# %bb.2:
retq
.Lfunc_end1:
.size _Z10host_saxpyPfS_f, .Lfunc_end1-_Z10host_saxpyPfS_f
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x41dfffffff800000 # double 2147483646
.LCPI2_5:
.quad 0x41cdcd6500000000 # double 1.0E+9
.LCPI2_7:
.quad 0x3f1a36e2eb1c432d # double 1.0E-4
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI2_1:
.long 0x40000000 # float 2
.LCPI2_2:
.long 0x5f000000 # float 9.22337203E+18
.LCPI2_3:
.long 0x3f800000 # float 1
.LCPI2_4:
.long 0x40a00000 # float 5
.LCPI2_8:
.long 0x00000000 # float 0
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI2_6:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $192, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, %rbx
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, %r14
movl $1, %r15d
xorl %r12d, %r12d
movabsq $8589934597, %r13 # imm = 0x200000005
.p2align 4, 0x90
.LBB2_1: # =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
# Child Loop BB2_9 Depth 2
fldl .LCPI2_0(%rip)
fstpt (%rsp)
callq logl
fstpt 68(%rsp) # 10-byte Folded Spill
flds .LCPI2_1(%rip)
fstpt (%rsp)
callq logl
fldt 68(%rsp) # 10-byte Folded Reload
fdivp %st, %st(1)
flds .LCPI2_2(%rip)
xorl %ecx, %ecx
fxch %st(1)
fucomi %st(1), %st
fldz
fcmovnb %st(2), %st
fstp %st(2)
fsubp %st, %st(1)
setae %cl
fnstcw 26(%rsp)
movzwl 26(%rsp), %eax
orl $3072, %eax # imm = 0xC00
movw %ax, 30(%rsp)
fldcw 30(%rsp)
fistpll 88(%rsp)
fldcw 26(%rsp)
shlq $63, %rcx
xorq 88(%rsp), %rcx
leaq 23(%rcx), %rax
xorl %edx, %edx
divq %rcx
movq %rax, %rcx
cmpq $1, %rax
adcq $0, %rcx
movss .LCPI2_3(%rip), %xmm3 # xmm3 = mem[0],zero,zero,zero
movaps %xmm3, %xmm1
xorps %xmm0, %xmm0
jmp .LBB2_2
.p2align 4, 0x90
.LBB2_4: # in Loop: Header=BB2_2 Depth=2
xorps %xmm2, %xmm2
cvtsi2ss %rdx, %xmm2
.LBB2_5: # in Loop: Header=BB2_2 Depth=2
addq %rax, %r15
mulss %xmm1, %xmm2
movss %xmm1, 60(%rsp)
flds 60(%rsp)
fmull .LCPI2_0(%rip)
addss %xmm2, %xmm0
fstps 56(%rsp)
movss 56(%rsp), %xmm1 # xmm1 = mem[0],zero,zero,zero
decq %rcx
je .LBB2_6
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
imulq $16807, %r15, %r15 # imm = 0x41A7
movq %r15, %rax
mulq %r13
movq %r15, %rax
subq %rdx, %rax
shrq %rax
addq %rdx, %rax
shrq $30, %rax
movq %rax, %rdx
shlq $31, %rdx
subq %rdx, %rax
leaq (%r15,%rax), %rdx
decq %rdx
testq %rdx, %rdx
jns .LBB2_4
# %bb.3: # in Loop: Header=BB2_2 Depth=2
movq %rdx, %rsi
shrq %rsi
andl $1, %edx
orq %rsi, %rdx
xorps %xmm2, %xmm2
cvtsi2ss %rdx, %xmm2
addss %xmm2, %xmm2
jmp .LBB2_5
.p2align 4, 0x90
.LBB2_6: # in Loop: Header=BB2_1 Depth=1
divss %xmm1, %xmm0
ucomiss %xmm3, %xmm0
jae .LBB2_7
.LBB2_8: # %_ZNSt25uniform_real_distributionIfEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEfRT_.exit
# in Loop: Header=BB2_1 Depth=1
mulss .LCPI2_4(%rip), %xmm0
xorps %xmm1, %xmm1
addss %xmm1, %xmm0
movss %xmm0, (%rbx,%r12,4)
fldl .LCPI2_0(%rip)
fstpt (%rsp)
callq logl
fstpt 68(%rsp) # 10-byte Folded Spill
flds .LCPI2_1(%rip)
fstpt (%rsp)
callq logl
xorps %xmm0, %xmm0
fldt 68(%rsp) # 10-byte Folded Reload
fdivp %st, %st(1)
flds .LCPI2_2(%rip)
xorl %ecx, %ecx
fxch %st(1)
fucomi %st(1), %st
fldz
fcmovnb %st(2), %st
fstp %st(2)
fsubp %st, %st(1)
setae %cl
fnstcw 24(%rsp)
movzwl 24(%rsp), %eax
orl $3072, %eax # imm = 0xC00
movw %ax, 28(%rsp)
fldcw 28(%rsp)
fistpll 80(%rsp)
fldcw 24(%rsp)
shlq $63, %rcx
xorq 80(%rsp), %rcx
leaq 23(%rcx), %rax
xorl %edx, %edx
divq %rcx
movq %rax, %rcx
cmpq $1, %rax
adcq $0, %rcx
movss .LCPI2_3(%rip), %xmm2 # xmm2 = mem[0],zero,zero,zero
movaps %xmm2, %xmm3
jmp .LBB2_9
.p2align 4, 0x90
.LBB2_11: # in Loop: Header=BB2_9 Depth=2
xorps %xmm1, %xmm1
cvtsi2ss %rdx, %xmm1
.LBB2_12: # in Loop: Header=BB2_9 Depth=2
addq %rax, %r15
mulss %xmm3, %xmm1
movss %xmm3, 52(%rsp)
flds 52(%rsp)
fmull .LCPI2_0(%rip)
addss %xmm1, %xmm0
fstps 48(%rsp)
movss 48(%rsp), %xmm3 # xmm3 = mem[0],zero,zero,zero
decq %rcx
je .LBB2_13
.LBB2_9: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
imulq $16807, %r15, %r15 # imm = 0x41A7
movq %r15, %rax
mulq %r13
movq %r15, %rax
subq %rdx, %rax
shrq %rax
addq %rdx, %rax
shrq $30, %rax
movq %rax, %rdx
shlq $31, %rdx
subq %rdx, %rax
leaq (%r15,%rax), %rdx
decq %rdx
testq %rdx, %rdx
jns .LBB2_11
# %bb.10: # in Loop: Header=BB2_9 Depth=2
movq %rdx, %rsi
shrq %rsi
andl $1, %edx
orq %rsi, %rdx
xorps %xmm1, %xmm1
cvtsi2ss %rdx, %xmm1
addss %xmm1, %xmm1
jmp .LBB2_12
.p2align 4, 0x90
.LBB2_13: # in Loop: Header=BB2_1 Depth=1
divss %xmm3, %xmm0
ucomiss %xmm2, %xmm0
jae .LBB2_14
.LBB2_15: # %_ZNSt25uniform_real_distributionIfEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEfRT_.exit34
# in Loop: Header=BB2_1 Depth=1
mulss .LCPI2_4(%rip), %xmm0
addss .LCPI2_8(%rip), %xmm0
movss %xmm0, (%r14,%r12,4)
incq %r12
cmpq $10000000, %r12 # imm = 0x989680
jne .LBB2_1
jmp .LBB2_16
.LBB2_7: # in Loop: Header=BB2_1 Depth=1
xorps %xmm1, %xmm1
movss .LCPI2_3(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
callq nextafterf
jmp .LBB2_8
.LBB2_14: # in Loop: Header=BB2_1 Depth=1
xorps %xmm1, %xmm1
movss .LCPI2_3(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
callq nextafterf
jmp .LBB2_15
.LBB2_16:
movl $.L.str, %edi
xorl %eax, %eax
callq printf
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, %r15
movq $0, 40(%rsp)
movq $0, 32(%rsp)
leaq 40(%rsp), %rdi
movl $40000000, %esi # imm = 0x2625A00
callq hipMalloc
leaq 32(%rsp), %rdi
movl $40000000, %esi # imm = 0x2625A00
callq hipMalloc
movq 40(%rsp), %rdi
movl $40000000, %edx # imm = 0x2625A00
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
movl $40000000, %edx # imm = 0x2625A00
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967552, %rdx # imm = 0x100000100
leaq 38807(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_18
# %bb.17:
movq 40(%rsp), %rax
movq 32(%rsp), %rcx
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movl $1065353216, 64(%rsp) # imm = 0x3F800000
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 144(%rsp), %rax
movq %rax, 168(%rsp)
leaq 64(%rsp), %rax
movq %rax, 176(%rsp)
leaq 128(%rsp), %rdi
leaq 112(%rsp), %rsi
leaq 104(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rax
movq 96(%rsp), %rdi
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 112(%rsp), %rcx
movl 120(%rsp), %r8d
movq %rdi, 8(%rsp)
movq %rax, (%rsp)
leaq 160(%rsp), %r9
movl $_Z12device_saxpyPfS_f, %edi
callq hipLaunchKernel
.LBB2_18:
callq hipDeviceSynchronize
callq _ZNSt6chrono3_V212system_clock3nowEv
subq %r15, %rax
imulq $1000, %rax, %rax # imm = 0x3E8
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
divsd .LCPI2_5(%rip), %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
xorl %r12d, %r12d
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, %r15
.p2align 4, 0x90
.LBB2_19: # =>This Inner Loop Header: Depth=1
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%r14,%r12,4), %xmm0
movss %xmm0, (%r14,%r12,4)
incq %r12
cmpq $10000000, %r12 # imm = 0x989680
jne .LBB2_19
# %bb.20: # %_Z10host_saxpyPfS_f.exit
callq _ZNSt6chrono3_V212system_clock3nowEv
subq %r15, %rax
imulq $1000, %rax, %rax # imm = 0x3E8
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
divsd .LCPI2_5(%rip), %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
movq 32(%rsp), %rsi
movl $40000000, %edx # imm = 0x2625A00
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r15d, %r15d
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movaps .LCPI2_6(%rip), %xmm0 # xmm0 = [NaN,NaN,NaN,NaN]
movsd .LCPI2_7(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB2_21: # =>This Inner Loop Header: Depth=1
movss (%rbx,%r15,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
subss (%r14,%r15,4), %xmm2
andps %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm1, %xmm2
ja .LBB2_24
# %bb.22: # in Loop: Header=BB2_21 Depth=1
incq %r15
cmpq $10000000, %r15 # imm = 0x989680
jne .LBB2_21
# %bb.23:
movl $.Lstr.1, %edi
jmp .LBB2_25
.LBB2_24:
movl $.Lstr, %edi
.LBB2_25: # %.critedge
callq puts@PLT
movq %r14, %rdi
callq free
movq %rbx, %rdi
callq free
movq 32(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $192, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12device_saxpyPfS_f, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12device_saxpyPfS_f,@object # @_Z12device_saxpyPfS_f
.section .rodata,"a",@progbits
.globl _Z12device_saxpyPfS_f
.p2align 3, 0x0
_Z12device_saxpyPfS_f:
.quad _Z27__device_stub__device_saxpyPfS_f
.size _Z12device_saxpyPfS_f, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Computing SAXPY on the GPU... "
.size .L.str, 31
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Done in %f ms!\n\n"
.size .L.str.1, 17
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Computing SAXPY on the CPU... "
.size .L.str.2, 31
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Comparing the output for each implementation... "
.size .L.str.3, 49
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12device_saxpyPfS_f"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Incorrect!"
.size .Lstr, 11
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Correct!"
.size .Lstr.1, 9
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__device_saxpyPfS_f
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12device_saxpyPfS_f
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z12device_saxpyPfS_f
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fc800078e0205 */
/*0070*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fe400078e0205 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ FFMA R7, R2, c[0x0][0x170], R7 ; /* 0x00005c0002077a23 */
/* 0x004fca0000000007 */
/*00b0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12device_saxpyPfS_f
.globl _Z12device_saxpyPfS_f
.p2align 8
.type _Z12device_saxpyPfS_f,@function
_Z12device_saxpyPfS_f:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b32 s0, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v3, s0, v2
global_store_b32 v[0:1], v3, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12device_saxpyPfS_f
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12device_saxpyPfS_f, .Lfunc_end0-_Z12device_saxpyPfS_f
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12device_saxpyPfS_f
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12device_saxpyPfS_f.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.